diff --git a/python/docs/pyspark.streaming.rst b/python/docs/pyspark.streaming.rst index 5024d694b668f..f08185627d0bc 100644 --- a/python/docs/pyspark.streaming.rst +++ b/python/docs/pyspark.streaming.rst @@ -1,5 +1,5 @@ pyspark.streaming module -================== +======================== Module contents --------------- diff --git a/python/pyspark/mllib/__init__.py b/python/pyspark/mllib/__init__.py index 5030a655fcbba..c3217620e3c4e 100644 --- a/python/pyspark/mllib/__init__.py +++ b/python/pyspark/mllib/__init__.py @@ -32,29 +32,4 @@ import rand as random random.__name__ = 'random' random.RandomRDDs.__module__ = __name__ + '.random' - - -class RandomModuleHook(object): - """ - Hook to import pyspark.mllib.random - """ - fullname = __name__ + '.random' - - def find_module(self, name, path=None): - # skip all other modules - if not name.startswith(self.fullname): - return - return self - - def load_module(self, name): - if name == self.fullname: - return random - - cname = name.rsplit('.', 1)[-1] - try: - return getattr(random, cname) - except AttributeError: - raise ImportError - - -sys.meta_path.append(RandomModuleHook()) +sys.modules[__name__ + '.random'] = random diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py index 741c630cbd6eb..e46af208866a2 100644 --- a/python/pyspark/mllib/feature.py +++ b/python/pyspark/mllib/feature.py @@ -53,10 +53,10 @@ class Normalizer(VectorTransformer): """ :: Experimental :: - Normalizes samples individually to unit L\ :sup:`p`\ norm + Normalizes samples individually to unit L\ :sup:`p`\ norm - For any 1 <= `p` <= float('inf'), normalizes samples using - sum(abs(vector). :sup:`p`) :sup:`(1/p)` as norm. + For any 1 <= `p` < float('inf'), normalizes samples using + sum(abs(vector) :sup:`p`) :sup:`(1/p)` as norm. For `p` = float('inf'), max(abs(vector)) will be used as norm for normalization.