From 1656580dcff44c66e1874e7c411def4d41c390d6 Mon Sep 17 00:00:00 2001 From: Yuchen Liu Date: Tue, 11 Jun 2024 12:07:06 -0700 Subject: [PATCH] improve doc --- .../spark/sql/execution/streaming/state/StateStore.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala index 8214daaeb3bb3..eb5339e20ef85 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala @@ -379,6 +379,8 @@ trait StateStoreProvider { new WrappedReadStateStore(getStore(version)) /** + * This is an optional method, used by snapshotBatchId option when reading state as data source. + * * Return an instance of [[ReadStateStore]] representing state data of the given version. * The State Store will be constructed from the batch at startVersion, and applying delta files * up to the endVersion. If there is no snapshot file of batch startVersion, an exception will @@ -389,7 +391,7 @@ trait StateStoreProvider { */ def getReadStore(startVersion: Long, endVersion: Long): ReadStateStore = throw new SparkUnsupportedOperationException("getReadStore with startVersion and endVersion " + - "is not supported by this StateStoreProvider") + s"is not supported by ${this.getClass.toString}") /** Optional method for providers to allow for background maintenance (e.g. compactions) */ def doMaintenance(): Unit = { }