diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py index a246fe1c3053f..9864dc98c1f33 100644 --- a/python/pyspark/sql/dataframe.py +++ b/python/pyspark/sql/dataframe.py @@ -39,6 +39,7 @@ from pyspark.sql.streaming import DataStreamWriter from pyspark.sql.types import IntegralType from pyspark.sql.types import * +from pyspark.util import _exception_message __all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"] @@ -1902,7 +1903,7 @@ def toPandas(self): except ImportError as e: msg = "note: pyarrow must be installed and available on calling Python process " \ "if using spark.sql.execution.arrow.enabled=true" - raise ImportError("%s\n%s" % (e.message, msg)) + raise ImportError("%s\n%s" % (_exception_message(e), msg)) else: pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns) diff --git a/python/pyspark/sql/types.py b/python/pyspark/sql/types.py index f9fe81b2c17b7..40f82832950ef 100644 --- a/python/pyspark/sql/types.py +++ b/python/pyspark/sql/types.py @@ -35,6 +35,7 @@ from pyspark import SparkContext from pyspark.serializers import CloudPickleSerializer +from pyspark.util import _exception_message __all__ = [ "DataType", "NullType", "StringType", "BinaryType", "BooleanType", "DateType", @@ -1682,7 +1683,7 @@ def _old_pandas_exception_message(e): """ Create an error message for importing old Pandas. """ msg = "note: Pandas (>=0.19.2) must be installed and available on calling Python process" - return "%s\n%s" % (e.message, msg) + return "%s\n%s" % (_exception_message(e), msg) def _check_dataframe_localize_timestamps(pdf, timezone):