diff --git a/core/pom.xml b/core/pom.xml index 6e3781f2bb0c6..2a81f6df289c0 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,11 +21,7 @@ org.apache.spark spark-parent -<<<<<<< HEAD 1.2.0-SNAPSHOT -======= - 1.1.0-SNAPSHOT ->>>>>>> modified the code base on comment in https://github.com/tdas/spark/pull/10 ../pom.xml diff --git a/python/pyspark/streaming/dstream.py b/python/pyspark/streaming/dstream.py index 1b095f2a42372..0029178ec4f2b 100644 --- a/python/pyspark/streaming/dstream.py +++ b/python/pyspark/streaming/dstream.py @@ -22,11 +22,10 @@ from pyspark.serializers import NoOpSerializer,\ BatchedSerializer, CloudPickleSerializer, pack_long,\ CompressedSerializer -from pyspark.rdd import _JavaStackTrace from pyspark.storagelevel import StorageLevel from pyspark.resultiterable import ResultIterable from pyspark.streaming.util import rddToFileName, RDDFunction - +from pyspark.traceback_utils import SCCallSiteSync from py4j.java_collections import ListConverter, MapConverter @@ -187,7 +186,7 @@ def add_shuffle_key(split, iterator): yield outputSerializer.dumps(items) keyed = PipelinedDStream(self, add_shuffle_key) keyed._bypass_serializer = True - with _JavaStackTrace(self.ctx) as st: + with SCCallSiteSync(self.context) as css: partitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc)) jdstream = self.ctx._jvm.PythonPairwiseDStream(keyed._jdstream.dstream(),