From 1f3ea93742e9edc4fca8d8b9d7d2c9e9039966cf Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Fri, 20 Sep 2024 12:04:49 -0700 Subject: [PATCH] lint and minor, need to think about offset log --- .../spark/sql/connect/planner/SparkConnectPlanner.scala | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/sql/connect/server/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala b/sql/connect/server/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala index f96aa8420837b..381be09410f6b 100644 --- a/sql/connect/server/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala +++ b/sql/connect/server/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala @@ -1195,7 +1195,8 @@ class SparkConnectPlanner( } private def groupColsFromDropDuplicates( - colNames: Seq[String], allColumns: Seq[Attribute]): Seq[Attribute] = { + colNames: Seq[String], + allColumns: Seq[Attribute]): Seq[Attribute] = { val resolver = session.sessionState.analyzer.resolver // [SPARK-31990][SPARK-49722]: We must keep `toSet.toSeq` here because of the backward // compatibility issue (the Streaming's state store depends on the `groupCols` order). @@ -1226,11 +1227,11 @@ class SparkConnectPlanner( val resolver = session.sessionState.analyzer.resolver val allColumns = queryExecution.analyzed.output if (rel.getAllColumnsAsKeys) { - val groupCals = groupColsFromDropDuplicates(allColumns.map(_.name), allColumns) + val groupCols = groupColsFromDropDuplicates(allColumns.map(_.name), allColumns) if (rel.getWithinWatermark) { - DeduplicateWithinWatermark(groupCals, queryExecution.analyzed) + DeduplicateWithinWatermark(groupCols, queryExecution.analyzed) } else { - Deduplicate(groupCals, queryExecution.analyzed) + Deduplicate(groupCols, queryExecution.analyzed) } } else { val toGroupColumnNames = rel.getColumnNamesList.asScala.toSeq