[SPARK-45870][INFRA] Upgrade action/checkout to v4 #117
Annotations
2 errors
RocksDBStateStoreStreamingAggregationSuite.SPARK-21977: coalesce(1) with aggregation should still be repartitioned when it has non-empty grouping keys - state format version 2 (RocksDBStateStore with changelog checkpointing):
RocksDBStateStoreStreamingAggregationSuite#L1
org.scalatest.exceptions.TestFailedException:
Timed out waiting for stream: The code passed to failAfter did not complete within 120 seconds.
java.base/java.lang.Thread.getStackTrace(Thread.java:1619)
org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:277)
org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7(StreamTest.scala:481)
org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7$adapted(StreamTest.scala:480)
scala.collection.mutable.HashMap$Node.foreach(HashMap.scala:642)
scala.collection.mutable.HashMap.foreach(HashMap.scala:504)
org.apache.spark.sql.streaming.StreamTest.fetchStreamAnswer$1(StreamTest.scala:480)
Caused by: null
java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1764)
org.apache.spark.sql.execution.streaming.StreamExecution.awaitOffset(StreamExecution.scala:481)
org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$8(StreamTest.scala:482)
scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
org.scalatest.enablers.Timed$$anon$1.timeoutAfter(Timed.scala:127)
org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:282)
org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7(StreamTest.scala:481)
== Progress ==
StartStream(ProcessingTimeTrigger(0),org.apache.spark.util.SystemClock@79b0c56d,Map(),/home/runner/work/spark/spark/target/tmp/spark-710d1a48-5161-4cf1-aca4-7829458bd50a)
AddBlockData(org.apache.spark.sql.streaming.StreamingAggregationSuite$BlockRDDBackedSource@770a9469,ArraySeq(List(1)))
=> CheckLastBatch: [0,1]
AssertOnQuery(<condition>, Verify addition of exchange operator)
StopStream
== Stream ==
Output Mode: Complete
Stream state: {}
Thread state: alive
Thread stack trace: java.base@17.0.9/jdk.internal.misc.Unsafe.park(Native Method)
java.base@17.0.9/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
java.base@17.0.9/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
java.base@17.0.9/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
app//scala.concurrent.impl.Promise$DefaultPromise.tryAwait0(Promise.scala:243)
app//scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:255)
app//scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:104)
app//org.apache.spark.util.ThreadUtils$.awaitReady(ThreadUtils.scala:342)
app//org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:975)
app//org.apache.spark.SparkContext.runJob(SparkContext.scala:2427)
app//org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec.writeWithV2(WriteToDataSourceV2Exec.scala:386)
app//org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec.writeWithV2$(WriteToDataSourceV2Exec.scala:360)
app//org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2Exec.writeWithV2(WriteToDataSourceV2Exec.scala:308)
app//org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2Exec.run(WriteToDataSourceV2Exec.scala:319)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result$lzycompute(V2CommandExec.scala:43)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result(V2CommandExec.scala:43)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.executeCollect(V2CommandExec.scala:49)
app//org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:4411)
app//org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:3643)
app//org.apache.spark.sql.Dataset$$Lambda$2302/0x00007f69552ca3e8.apply(Unknown Source)
app//org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:4401)
app//org.apache.spark.sql.Dataset$$Lambda$2314/0x00007f69552cfa98.apply(Unknown Source)
app//org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
app//org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:4399)
app//org.apache.spark.sql.Dataset$$Lambda$2303/0x00007f69552ca7b8.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2258/0x00007f69552ae1a8.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2244/0x00007f69552ab0d8.apply(Unknown Source)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
app//org.apache.spark.sql.Dataset.withAction(Dataset.scala:4399)
app//org.apache.spark.sql.Dataset.collect(Dataset.scala:3643)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runBatch$17(MicroBatchExecution.scala:783)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$2243/0x00007f69552aae18.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2258/0x00007f69552ae1a8.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2244/0x00007f69552ab0d8.apply(Unknown Source)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runBatch$16(MicroBatchExecution.scala:771)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$2241/0x00007f69552aa5c8.apply(Unknown Source)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:427)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:425)
app//org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.runBatch(MicroBatchExecution.scala:771)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$2(MicroBatchExecution.scala:326)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$1953/0x00007f69551d2028.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:427)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:425)
app//org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$1(MicroBatchExecution.scala:289)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$1950/0x00007f69551d1458.apply$mcZ$sp(Unknown Source)
app//org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor.execute(TriggerExecutor.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:279)
app//org.apache.spark.sql.execution.streaming.StreamExecution.$anonfun$runStream$1(StreamExecution.scala:311)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$Lambda$1933/0x00007f69551c5738.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
app//org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:289)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.$anonfun$run$1(StreamExecution.scala:211)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1$$Lambda$1928/0x00007f69551c3e28.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:211)
== Sink ==
== Plan ==
== Parsed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, 759fd156-a018-4842-935d-df499c06976d, Complete, 0
+- Aggregate [(a#17377 % 1)], [(a#17377 % 1) AS (a % 1)#17382, count(1) AS count#17381L]
+- Repartition 1, false
+- Project [a#17401 AS a#17377]
+- LogicalRDD [a#17401], true
== Analyzed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, 759fd156-a018-4842-935d-df499c06976d, Complete, 0
+- Aggregate [(a#17377 % 1)], [(a#17377 % 1) AS (a % 1)#17382, count(1) AS count#17381L]
+- Repartition 1, false
+- Project [a#17401 AS a#17377]
+- LogicalRDD [a#17401], true
== Optimized Logical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 0, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@52e041ef]
+- Aggregate [_groupingexpression#17404], [_groupingexpression#17404 AS (a % 1)#17382, count(1) AS count#17381L]
+- Project [(a#17401 % 1) AS _groupingexpression#17404]
+- Repartition 1, false
+- LogicalRDD [a#17401], true
== Physical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 0, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@52e041ef], org.apache.spark.sql.execution.datasources.v2.DataSourceV2Strategy$$Lambda$2187/0x00007f695527a418@1eaf32bb
+- *(5) HashAggregate(keys=[_groupingexpression#17404], functions=[count(1)], output=[(a % 1)#17382, count#17381L])
+- StateStoreSave [_groupingexpression#17404], state info [ checkpoint = file:/home/runner/work/spark/spark/target/tmp/spark-710d1a48-5161-4cf1-aca4-7829458bd50a/state, runId = b4722077-8774-4993-96eb-18284f1ab353, opId = 0, ver = 0, numPartitions = 5], Complete, 0, 0, 2
+- *(4) HashAggregate(keys=[_groupingexpression#17404], functions=[merge_count(1)], output=[_groupingexpression#17404, count#17406L])
+- StateStoreRestore [_groupingexpression#17404], state info [ checkpoint = file:/home/runner/work/spark/spark/target/tmp/spark-710d1a48-5161-4cf1-aca4-7829458bd50a/state, runId = b4722077-8774-4993-96eb-18284f1ab353, opId = 0, ver = 0, numPartitions = 5], 2
+- *(3) HashAggregate(keys=[_groupingexpression#17404], functions=[merge_count(1)], output=[_groupingexpression#17404, count#17406L])
+- Exchange hashpartitioning(_groupingexpression#17404, 5), ENSURE_REQUIREMENTS, [plan_id=78926]
+- *(2) HashAggregate(keys=[_groupingexpression#17404], functions=[partial_count(1)], output=[_groupingexpression#17404, count#17406L])
+- *(2) Project [(a#17401 % 1) AS _groupingexpression#17404]
+- Coalesce 1
+- *(1) Scan ExistingRDD[a#17401]
|
RocksDBStateStoreStreamingAggregationSuite.SPARK-22230: last should change with new batches - state format version 1 (RocksDBStateStore):
RocksDBStateStoreStreamingAggregationSuite#L1
org.scalatest.exceptions.TestFailedException:
Timed out waiting for stream: The code passed to failAfter did not complete within 120 seconds.
java.base/java.lang.Thread.getStackTrace(Thread.java:1619)
org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:277)
org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7(StreamTest.scala:481)
org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7$adapted(StreamTest.scala:480)
scala.collection.mutable.HashMap$Node.foreach(HashMap.scala:642)
scala.collection.mutable.HashMap.foreach(HashMap.scala:504)
org.apache.spark.sql.streaming.StreamTest.fetchStreamAnswer$1(StreamTest.scala:480)
Caused by: null
java.base/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1764)
org.apache.spark.sql.execution.streaming.StreamExecution.awaitOffset(StreamExecution.scala:481)
org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$8(StreamTest.scala:482)
scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
org.scalatest.enablers.Timed$$anon$1.timeoutAfter(Timed.scala:127)
org.scalatest.concurrent.TimeLimits$.failAfterImpl(TimeLimits.scala:282)
org.scalatest.concurrent.TimeLimits.failAfter(TimeLimits.scala:231)
org.scalatest.concurrent.TimeLimits.failAfter$(TimeLimits.scala:230)
org.apache.spark.SparkFunSuite.failAfter(SparkFunSuite.scala:69)
org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$7(StreamTest.scala:481)
== Progress ==
AddData to MemoryStream[value#17425]: 1,2,3
=> CheckLastBatch: [3]
AddData to MemoryStream[value#17425]: 4,5,6
CheckLastBatch: [6]
AddData to MemoryStream[value#17425]:
CheckLastBatch: [6]
AddData to MemoryStream[value#17425]: 0
CheckLastBatch: [0]
== Stream ==
Output Mode: Complete
Stream state: {}
Thread state: alive
Thread stack trace: java.base@17.0.9/jdk.internal.misc.Unsafe.park(Native Method)
java.base@17.0.9/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211)
java.base@17.0.9/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715)
java.base@17.0.9/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047)
app//scala.concurrent.impl.Promise$DefaultPromise.tryAwait0(Promise.scala:243)
app//scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:255)
app//scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:104)
app//org.apache.spark.util.ThreadUtils$.awaitReady(ThreadUtils.scala:342)
app//org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:975)
app//org.apache.spark.SparkContext.runJob(SparkContext.scala:2427)
app//org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec.writeWithV2(WriteToDataSourceV2Exec.scala:386)
app//org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec.writeWithV2$(WriteToDataSourceV2Exec.scala:360)
app//org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2Exec.writeWithV2(WriteToDataSourceV2Exec.scala:308)
app//org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2Exec.run(WriteToDataSourceV2Exec.scala:319)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result$lzycompute(V2CommandExec.scala:43)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result(V2CommandExec.scala:43)
app//org.apache.spark.sql.execution.datasources.v2.V2CommandExec.executeCollect(V2CommandExec.scala:49)
app//org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:4411)
app//org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:3643)
app//org.apache.spark.sql.Dataset$$Lambda$2302/0x00007f69552ca3e8.apply(Unknown Source)
app//org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:4401)
app//org.apache.spark.sql.Dataset$$Lambda$2314/0x00007f69552cfa98.apply(Unknown Source)
app//org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:557)
app//org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:4399)
app//org.apache.spark.sql.Dataset$$Lambda$2303/0x00007f69552ca7b8.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2258/0x00007f69552ae1a8.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2244/0x00007f69552ab0d8.apply(Unknown Source)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
app//org.apache.spark.sql.Dataset.withAction(Dataset.scala:4399)
app//org.apache.spark.sql.Dataset.collect(Dataset.scala:3643)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runBatch$17(MicroBatchExecution.scala:783)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$2243/0x00007f69552aae18.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$6(SQLExecution.scala:150)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2258/0x00007f69552ae1a8.apply(Unknown Source)
app//org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:241)
app//org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId0$1(SQLExecution.scala:116)
app//org.apache.spark.sql.execution.SQLExecution$$$Lambda$2244/0x00007f69552ab0d8.apply(Unknown Source)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId0(SQLExecution.scala:72)
app//org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:196)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runBatch$16(MicroBatchExecution.scala:771)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$2241/0x00007f69552aa5c8.apply(Unknown Source)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:427)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:425)
app//org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.runBatch(MicroBatchExecution.scala:771)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$2(MicroBatchExecution.scala:326)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$1953/0x00007f69551d2028.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:427)
app//org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:425)
app//org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.$anonfun$runActivatedStream$1(MicroBatchExecution.scala:289)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution$$Lambda$1950/0x00007f69551d1458.apply$mcZ$sp(Unknown Source)
app//org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor.execute(TriggerExecutor.scala:67)
app//org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:279)
app//org.apache.spark.sql.execution.streaming.StreamExecution.$anonfun$runStream$1(StreamExecution.scala:311)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$Lambda$1933/0x00007f69551c5738.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:907)
app//org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:289)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.$anonfun$run$1(StreamExecution.scala:211)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1$$Lambda$1928/0x00007f69551c3e28.apply$mcV$sp(Unknown Source)
app//scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.scala:18)
app//org.apache.spark.JobArtifactSet$.withActiveJobArtifactState(JobArtifactSet.scala:94)
app//org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:211)
== Sink ==
== Plan ==
== Parsed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, d033d2ca-efd2-4b0c-b201-2fdad4c37e86, Complete, 0
+- Aggregate [last(value#17425, false) AS last(value)#17429]
+- StreamingDataSourceV2Relation [value#17425], org.apache.spark.sql.execution.streaming.MemoryStreamScanBuilder@1f77dd26, MemoryStream[value#17425], -1, 0
== Analyzed Logical Plan ==
WriteToMicroBatchDataSource MemorySink, d033d2ca-efd2-4b0c-b201-2fdad4c37e86, Complete, 0
+- Aggregate [last(value#17425, false) AS last(value)#17429]
+- StreamingDataSourceV2Relation [value#17425], org.apache.spark.sql.execution.streaming.MemoryStreamScanBuilder@1f77dd26, MemoryStream[value#17425], -1, 0
== Optimized Logical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 0, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@52541238]
+- Aggregate [last(value#17425, false) AS last(value)#17429]
+- StreamingDataSourceV2Relation [value#17425], org.apache.spark.sql.execution.streaming.MemoryStreamScanBuilder@1f77dd26, MemoryStream[value#17425], -1, 0
== Physical Plan ==
WriteToDataSourceV2 MicroBatchWrite[epoch: 0, writer: org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@52541238], org.apache.spark.sql.execution.datasources.v2.DataSourceV2Strategy$$Lambda$2187/0x00007f695527a418@25b1c703
+- *(4) HashAggregate(keys=[], functions=[last(value#17425, false)], output=[last(value)#17429])
+- StateStoreSave state info [ checkpoint = file:/home/runner/work/spark/spark/target/tmp/streaming.metadata-323370ad-9c74-42cb-b87f-acb3ea277c01/state, runId = 31e4adeb-c74d-4773-a7e0-c8dafaa92ca1, opId = 0, ver = 0, numPartitions = 5], Complete, 0, 0, 1
+- *(3) HashAggregate(keys=[], functions=[merge_last(value#17425, false)], output=[last#17454, valueSet#17455])
+- StateStoreRestore state info [ checkpoint = file:/home/runner/work/spark/spark/target/tmp/streaming.metadata-323370ad-9c74-42cb-b87f-acb3ea277c01/state, runId = 31e4adeb-c74d-4773-a7e0-c8dafaa92ca1, opId = 0, ver = 0, numPartitions = 5], 1
+- *(2) HashAggregate(keys=[], functions=[merge_last(value#17425, false)], output=[last#17454, valueSet#17455])
+- Exchange SinglePartition, ENSURE_REQUIREMENTS, [plan_id=79179]
+- *(1) HashAggregate(keys=[], functions=[partial_last(value#17425, false)], output=[last#17454, valueSet#17455])
+- *(1) Project [value#17425]
+- MicroBatchScan[value#17425] MemoryStreamDataSource
|