diff --git a/python/pyspark/sql/types.py b/python/pyspark/sql/types.py index 60d5684268eaa..fa98d09a9af9a 100644 --- a/python/pyspark/sql/types.py +++ b/python/pyspark/sql/types.py @@ -50,7 +50,7 @@ from pyspark.sql.utils import ( has_numpy, get_active_spark_context, - escapeMetaCharacters, + escape_meta_characters, StringConcat, ) from pyspark.sql.variant_utils import VariantUtils @@ -1076,7 +1076,7 @@ def _build_formatted_string( ) -> None: if maxDepth > 0: stringConcat.append( - f"{prefix}-- {escapeMetaCharacters(self.name)}: {self.dataType.typeName()} " + f"{prefix}-- {escape_meta_characters(self.name)}: {self.dataType.typeName()} " + f"(nullable = {str(self.nullable).lower()})\n" ) DataType._data_type_build_formatted_string( diff --git a/python/pyspark/sql/utils.py b/python/pyspark/sql/utils.py index d0167e6bb67bb..171f92e557a12 100644 --- a/python/pyspark/sql/utils.py +++ b/python/pyspark/sql/utils.py @@ -164,7 +164,7 @@ def toString(self) -> str: # Python implementation of 'org.apache.spark.util.SparkSchemaUtils.escapeMetaCharacters' -def escapeMetaCharacters(s: str) -> str: +def escape_meta_characters(s: str) -> str: return ( s.replace("\n", "\\n") .replace("\r", "\\r")