From 69193dbd64e9e0002abd9a8cd6fe60c1c87bc471 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Fri, 2 Feb 2018 15:00:39 -0800 Subject: [PATCH 1/2] [SQL] Minor doc update: Add an example in DataFrameReader.schema --- .../main/scala/org/apache/spark/sql/DataFrameReader.scala | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index 46b5f54a33f74..5a1e03fe58ac6 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -59,7 +59,7 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { /** * Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema - * automatically from data. By specifying the schema here, the underlying data source can + * automatically from data. By specifyinlg the schema here, the underlying data source can * skip the schema inference step, and thus speed up data loading. * * @since 1.4.0 @@ -74,6 +74,10 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { * infer the input schema automatically from data. By specifying the schema here, the underlying * data source can skip the schema inference step, and thus speed up data loading. * + * {{{ + * spark.read.schema("a INT, b STRING, c DOUBLE").csv("test.csv") + * }}} + * * @since 2.3.0 */ def schema(schemaString: String): DataFrameReader = { From e5e5e0b44e22f58736dd27e5c048395670574f18 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Fri, 2 Feb 2018 15:02:26 -0800 Subject: [PATCH 2/2] fix typo --- .../src/main/scala/org/apache/spark/sql/DataFrameReader.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index 5a1e03fe58ac6..fcaf8d618c168 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -59,7 +59,7 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { /** * Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema - * automatically from data. By specifyinlg the schema here, the underlying data source can + * automatically from data. By specifying the schema here, the underlying data source can * skip the schema inference step, and thus speed up data loading. * * @since 1.4.0