Home
last modified time | relevance | path

Searched refs:queryExecution (Results 1 – 25 of 91) sorted by relevance

1234

/dports/devel/spark/spark-2.1.1/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/
H A DKafkaWriter.scala47 queryExecution: QueryExecution,
50 val schema = queryExecution.logical.output
84 queryExecution: QueryExecution,
87 val schema = queryExecution.logical.output
88 validateQuery(queryExecution, kafkaParameters, topic)
89 SQLExecution.withNewExecutionId(sparkSession, queryExecution) {
90 queryExecution.toRdd.foreachPartition { iter =>
/dports/devel/spark/spark-2.1.1/sql/core/src/test/scala/org/apache/spark/sql/execution/
H A DWholeStageCodegenSuite.scala32 val plan = df.queryExecution.executedPlan
39 val plan = df.queryExecution.executedPlan
48 val plan = df.queryExecution.executedPlan
60 assert(df.queryExecution.executedPlan.find(p =>
68 val plan = df.queryExecution.executedPlan
79 val plan = ds.queryExecution.executedPlan
88 val plan = ds.queryExecution.executedPlan
97 val plan = ds.queryExecution.executedPlan
110 val plan = ds.queryExecution.executedPlan
H A DSparkPlanTest.scala147 val outputPlan = planFunction(input.queryExecution.sparkPlan)
148 val expectedOutputPlan = expectedPlanFunction(input.queryExecution.sparkPlan)
208 val outputPlan = planFunction(input.map(_.queryExecution.sparkPlan))
H A DOptimizeMetadataOnlyQuerySuite.scala44 val localRelations = df.queryExecution.optimizedPlan.collect {
51 val localRelations = df.queryExecution.optimizedPlan.collect {
/dports/devel/spark/spark-2.1.1/sql/core/src/main/scala/org/apache/spark/sql/execution/command/
H A Dcommands.scala103 val queryExecution = constant
113 codegenString(queryExecution.executedPlan)
115 queryExecution.toString
117 queryExecution.simpleString
127 queryExecution: IncrementalExecution,
137 queryExecution.toString
139 queryExecution.simpleString
/dports/devel/spark/spark-2.1.1/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/
H A DPartitionBatchPruningSuite.scala153 val (readPartitions, readBatches) = df.queryExecution.sparkPlan.collect {
168 val queryExecution = df.queryExecution constant
170 assertResult(expectedQueryResult.toArray, s"Wrong query result: $queryExecution") {
174 val (readPartitions, readBatches) = df.queryExecution.sparkPlan.collect {
178 assert(readBatches === expectedReadBatches, s"Wrong number of read batches: $queryExecution")
181 s"Wrong number of read partitions: $queryExecution")
/dports/devel/spark/spark-2.1.1/sql/core/src/test/scala/org/apache/spark/sql/sources/
H A DPrunedScanSuite.scala127 val queryExecution = sql(sqlString).queryExecution constant
128 val rawPlan = queryExecution.executedPlan.collect {
132 case _ => fail(s"More than one PhysicalRDD found\n$queryExecution")
141 queryExecution)
145 fail(s"Wrong output row. Got $rawOutput\n$queryExecution")
H A DFilteredScanSuite.scala315 val queryExecution = sql(sqlString).queryExecution constant
316 val rawPlan = queryExecution.executedPlan.collect {
320 case _ => fail(s"More than one PhysicalRDD found\n$queryExecution")
326 val relation = table.queryExecution.logical.collectFirst {
337 queryExecution)
/dports/devel/spark/spark-2.1.1/sql/core/src/main/scala/org/apache/spark/sql/
H A DDataset.scala161 queryExecution.assertAnalyzed()
181 queryExecution.analyzed match {
185 LogicalRDD(queryExecution.analyzed.output, queryExecution.toRdd)(sparkSession)
187 LogicalRDD(queryExecution.analyzed.output, queryExecution.toRdd)(sparkSession)
189 queryExecution.analyzed
519 val physicalPlan = queryExecution.executedPlan
840 .queryExecution.analyzed.asInstanceOf[Join]
1872 val output = queryExecution.analyzed.output
1893 val output = queryExecution.analyzed.output
1918 val output = queryExecution.analyzed.output
[all …]
H A DKeyValueGroupedDataset.scala43 @transient val queryExecution: QueryExecution,
51 private def logicalPlan = queryExecution.analyzed
52 private def sparkSession = queryExecution.sparkSession
65 queryExecution,
/dports/devel/spark/spark-2.1.1/sql/core/src/test/scala/org/apache/spark/sql/
H A DDatasetBenchmark.scala59 res.queryExecution.toRdd.foreach(_ => Unit)
69 res.queryExecution.toRdd.foreach(_ => Unit)
103 res.queryExecution.toRdd.foreach(_ => Unit)
113 res.queryExecution.toRdd.foreach(_ => Unit)
145 df.select(sum($"l")).queryExecution.toRdd.foreach(_ => Unit)
149 df.as[Data].select(typed.sumLong((d: Data) => d.l)).queryExecution.toRdd.foreach(_ => Unit)
153 df.as[Data].select(ComplexAggregator.toColumn).queryExecution.toRdd.foreach(_ => Unit)
H A DStatisticsCollectionSuite.scala44 val stats = df.queryExecution.analyzed.collect { case rel: LogicalRelation =>
60 val sizes = df.queryExecution.analyzed.collect { case g: Join =>
108 assert(df.queryExecution.analyzed.statistics.sizeInBytes >
110 assert(df.selectExpr("a").queryExecution.analyzed.statistics.sizeInBytes >
121 val sizesGlobalLimit = df.queryExecution.analyzed.collect { case g: GlobalLimit =>
128 val sizesLocalLimit = df.queryExecution.analyzed.collect { case l: LocalLimit =>
252 val stats = spark.table("ds_tbl").queryExecution.optimizedPlan.statistics
258 val stats2 = spark.table("hive_tbl").queryExecution.optimizedPlan.statistics
H A DDataFrameSuite.scala89 assert(unionDF.queryExecution.analyzed.collect {
1275 df.queryExecution.executedPlan.foreach {
1290 df.queryExecution.executedPlan.foreach {
1474 val plan = join.queryExecution.executedPlan
1477 join.queryExecution.executedPlan.collect { case e: ShuffleExchange => true }.size === 1)
1486 join2.queryExecution.executedPlan
1498 assert(agg1.queryExecution.executedPlan.sameResult(agg2.queryExecution.executedPlan))
1500 assert(!agg1.queryExecution.executedPlan.sameResult(agg3.queryExecution.executedPlan))
1503 assert(!agg1.queryExecution.executedPlan.sameResult(agg4.queryExecution.executedPlan))
1644 dfWithFilter.queryExecution.optimizedPlan.collect {
[all …]
H A DQueryTest.scala217 val planWithCaching = query.queryExecution.withCachedData
232 assert(query.queryExecution.analyzed.missingInput.isEmpty,
234 assert(query.queryExecution.optimizedPlan.missingInput.isEmpty,
236 assert(query.queryExecution.executedPlan.missingInput.isEmpty,
/dports/devel/spark/spark-2.1.1/sql/hive/src/test/scala/org/apache/spark/sql/hive/
H A DparquetSuites.scala195 sql("SELECT * FROM normal_parquet").queryExecution.sparkPlan.collect {
286 table("test_parquet_ctas").queryExecution.optimizedPlan match {
310 df.queryExecution.sparkPlan match {
341 df.queryExecution.sparkPlan match {
372 """.stripMargin).queryExecution.analyzed
383 val plan = df.queryExecution.analyzed
805 val queryExecution = df.queryExecution constant
807 queryExecution.analyzed.collectFirst {
811 s"but got:\n$queryExecution")
814 queryExecution.analyzed.collectFirst {
[all …]
H A DStatisticsSuite.scala173 val stats = df.queryExecution.analyzed.collect {
482 val sizes = df.queryExecution.analyzed.collect { case mr: MetastoreRelation =>
502 val sizes = df.queryExecution.analyzed.collect {
511 var bhj = df.queryExecution.sparkPlan.collect { case j: BroadcastHashJoinExec => j }
522 bhj = df.queryExecution.sparkPlan.collect { case j: BroadcastHashJoinExec => j }
525 val shj = df.queryExecution.sparkPlan.collect { case j: SortMergeJoinExec => j }
556 val sizes = df.queryExecution.analyzed.collect {
567 var bhj = df.queryExecution.sparkPlan.collect {
580 bhj = df.queryExecution.sparkPlan.collect {
585 val shj = df.queryExecution.sparkPlan.collect {
/dports/devel/spark/spark-2.1.1/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/
H A DHivePlanTest.scala32 val optimized = sql("SELECT cos(null) AS c FROM t").queryExecution.optimizedPlan
33 val correctAnswer = sql("SELECT cast(null as double) AS c FROM t").queryExecution.optimizedPlan
48 val plan = query.queryExecution.analyzed
H A DHiveResolutionSuite.scala37 sql("SELECT a[0].A.A from nested").queryExecution.analyzed
46 sql("SELECT a[0].b from nested").queryExecution.analyzed
/dports/devel/spark/spark-2.1.1/sql/core/src/main/scala/org/apache/spark/sql/execution/
H A DSQLExecution.scala41 queryExecution: QueryExecution)(body: => T): T = {
54 executionId, callSite.shortForm, callSite.longForm, queryExecution.toString,
55 SparkPlanInfo.fromSparkPlan(queryExecution.executedPlan), System.currentTimeMillis()))
/dports/devel/spark/spark-2.1.1/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/
H A DSQLListenerSuite.scala111 SparkPlanGraph(SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan))
124 df.queryExecution.toString,
125 SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
260 df.queryExecution.toString,
261 SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
290 df.queryExecution.toString,
291 SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
331 df.queryExecution.toString,
332 SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
372 0, "", "", "", SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan), 0)
/dports/devel/spark/spark-2.1.1/sql/core/src/test/scala/org/apache/spark/sql/internal/
H A DSQLConfSuite.scala232 .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec])
234 .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec])
239 .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec])
241 .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec])
246 .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec])
248 .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec])
/dports/devel/spark/spark-2.1.1/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/
H A DSQLMetricsSuite.scala62 df.queryExecution.executedPlan)).allNodes.filter { node =>
91 val logical = df1.queryExecution.logical
94 val metrics1 = df1.queryExecution.executedPlan.collectLeaves().head.metrics
100 val metrics2 = df2.queryExecution.executedPlan.collectLeaves().head.metrics
/dports/devel/spark/spark-2.1.1/sql/hive/src/test/scala/org/apache/spark/sql/sources/
H A DBucketedReadSuite.scala57 val query = table.queryExecution
63 val attrs = table.select("j", "k").queryExecution.analyzed.output
97 val plan = bucketedDataFrame.filter(filterCondition).queryExecution.executedPlan
280 assert(joined.queryExecution.executedPlan.isInstanceOf[SortMergeJoinExec])
281 val joinOperator = joined.queryExecution.executedPlan.asInstanceOf[SortMergeJoinExec]
453 assert(agged.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchange]).isEmpty)
467 assert(agged.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchange]).isEmpty)
/dports/devel/spark/spark-2.1.1/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/
H A DPrimitiveArrayBenchmark.scala49 len += dsInt.map(e => e).queryExecution.toRdd.collect.length
60 len += dsDouble.map(e => e).queryExecution.toRdd.collect.length
/dports/devel/spark/spark-2.1.1/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/
H A DSparkExecuteStatementOperation.scala232 logDebug(result.queryExecution.toString())
233 result.queryExecution.logical match {
239 HiveThriftServer2.listener.onStatementParsed(statementId, result.queryExecution.toString())
249 dataTypes = result.queryExecution.analyzed.output.map(_.dataType).toArray

1234