diff --git a/docker/multicuda_framework_directory.py b/docker/multicuda_framework_directory.py index c080bec0d52f2..3d087b521c9bd 100644 --- a/docker/multicuda_framework_directory.py +++ b/docker/multicuda_framework_directory.py @@ -28,9 +28,10 @@ def install_pkg(path, pkg, base="fw/"): f"pip3 install {pkg} --default-timeout=100 --no-cache-dir", shell=True ) -if __name__=="__main__": - arg_lis=sys.argv - directory_generator(arg_lis[1:],"") + +if __name__ == "__main__": + arg_lis = sys.argv + directory_generator(arg_lis[1:], "") # torch_req = ["torch/1.13.1"] diff --git a/docker/multiversion_framework_directory.py b/docker/multiversion_framework_directory.py index bcd309e88b302..9be5d047c2fa8 100644 --- a/docker/multiversion_framework_directory.py +++ b/docker/multiversion_framework_directory.py @@ -25,38 +25,41 @@ def install_pkg(path, pkg, base="fw/"): ) elif pkg.split("==")[0] == "tensorflow": subprocess.run( - f"pip3 install tensorflow-cpu=={pkg.split('==')[1]} --default-timeout=100 --target={path} --no-cache-dir", shell=True + f"pip3 install tensorflow-cpu=={pkg.split('==')[1]} --default-timeout=100 --target={path} --no-cache-dir", + shell=True, ) else: subprocess.run( - f"pip3 install {pkg} --default-timeout=100 --target={path} --no-cache-dir", shell=True + f"pip3 install {pkg} --default-timeout=100 --target={path} --no-cache-dir", + shell=True, ) -if __name__=="__main__": - arg_lis=sys.argv +if __name__ == "__main__": + arg_lis = sys.argv - if 'backend' in arg_lis: + if "backend" in arg_lis: for i in arg_lis[2:]: - if i.split('/')[0]=='torch': + if i.split("/")[0] == "torch": subprocess.run( f"pip3 install torch=={i.split('/')[1]} --default-timeout=100 --extra-index-url https://download.pytorch.org/whl/cpu --no-cache-dir", shell=True, ) - elif i.split('/')[0]=='tensorflow': + elif i.split("/")[0] == "tensorflow": print(i, "heere") subprocess.run( f"pip3 install tensorflow-cpu=={i.split('/')[1]} --default-timeout=100 --no-cache-dir", - shell=True + shell=True, ) - elif i.split('/')[0]=='jaxlib': + elif i.split("/")[0] == "jaxlib": subprocess.run( f"pip3 install {i} --default-timeout=100 -f https://storage.googleapis.com/jax-releases/jax_releases.html --no-cache-dir", shell=True, ) else: subprocess.run( - f"pip3 install {i.split('/')[0]}=={i.split('/')[1]} --default-timeout=100 --no-cache-dir", shell=True + f"pip3 install {i.split('/')[0]}=={i.split('/')[1]} --default-timeout=100 --no-cache-dir", + shell=True, ) try: import tensorflow @@ -87,6 +90,3 @@ def install_pkg(path, pkg, base="fw/"): ) else: directory_generator(arg_lis[1:]) - - - diff --git a/docs/overview/contributing/setting_up.rst b/docs/overview/contributing/setting_up.rst index ca994aaa066a4..fdd2936ad62ca 100644 --- a/docs/overview/contributing/setting_up.rst +++ b/docs/overview/contributing/setting_up.rst @@ -185,11 +185,6 @@ Windows #. Install `WSL 2 `_. For most, it will only require running the command :code:`wsl --install` in powershell admin mode. Visit the link if it doesn't. -#. Get the latest Docker Image for Ivy by: - - a. Running Docker desktop. - b. Opening cmd, and running the command: :code:`docker pull unifyai/ivy:latest` - #. Install `Pycharm Professional Version `_, make sure to only install the Professional version of PyCharm, not the Community version. #. Open pycharm with your cloned Ivy repository. Add the remote python interpreter by: @@ -197,10 +192,19 @@ Windows a. Going to the settings -> Build, Execution, Deployment -> Docker Click the "+" on top left and it should add a docker connection. b. Going to settings -> project -> Python Interpreter - c. Clicking add interpreter (currently by clicking the ⚙ icon by the right side) which should open a new window. - d. Choosing "Docker" from the left panel. - Type python3 (with the number) in python interpreter path and press ok. - e. Opening "Edit Run/Debug configurations" dialog -> "Edit Configurations..." and making sure that "Working directory" is empty in case of getting the "Can't run process: the working directory '\ivy' is invalid, it needs to be an absolute path" error. + c. Clicking add interpreter (currently by clicking the ⚙ icon by the right side) which should open a new small drop down menu. Select "On Docker...".A window will open which will have three steps. +#. It will ask to create a new Docker target, at this step you have to select the following: + + a. Docker image -> Docker + b. Image -> Pull + c. Image tag -> unifyai/ivy:latest + d. Select "Next" +#. The image will start pulling. It will take a respectible amount of time to complete. Once you see "Introspection Completed" message, select "Next". +#. Another window will appear, at this step select the following: + + a. In the left panel select "System Interpreter". + b. For Interpreter, select the default option which will be "/usr/bin/python3" the select "Create". +#. Opening "Edit Run/Debug configurations" dialog -> "Edit Configurations..." and making sure that "Working directory" is empty in case of getting the "Can't run process: the working directory '\ivy' is invalid, it needs to be an absolute path" error. Once these steps are finished, your interpreter should be set up correctly! If Docker's latest version causes error, try using an earlier version by visiting `Docker release note `_. @@ -316,8 +320,20 @@ Ubuntu d. Choosing "Docker" from the left panel. Type python3 (with the number) in python interpreter path and press ok. -For questions, please reach out on `discord`_ in the `docker channel`_! +**Docker Connection not Successfull** +This is a common error which you might face. If you are not successfully able to connect docker with Pycharm(point 4a) and your docker is also running, the issue is that you are not able to use your docker socket. So, executing the below two commands should solve this. + + .. code-block:: none + + sudo chmod a+rwx /var/run/docker.sock + + .. code-block:: none + + sudo chmod a+rwx /var/run/docker.pid + + +For questions, please reach out on `discord`_ in the `docker channel`_! **Video** diff --git a/docs/partial_conf.py b/docs/partial_conf.py index 201e6d5fe2059..f1f1ea8abfa5e 100644 --- a/docs/partial_conf.py +++ b/docs/partial_conf.py @@ -47,10 +47,6 @@ # Only generate docs for index.rst # That resolved a bug of autosummary generating docs for code-block examples # of autosummary -autosummary_generate = ['index.rst'] +autosummary_generate = ["index.rst"] -skippable_method_attributes = [ - { - "__qualname__": "_wrap_function..new_function" - } -] +skippable_method_attributes = [{"__qualname__": "_wrap_function..new_function"}] diff --git a/ivy/data_classes/container/base.py b/ivy/data_classes/container/base.py index d7963f14f5cee..8fea7a05aa5eb 100644 --- a/ivy/data_classes/container/base.py +++ b/ivy/data_classes/container/base.py @@ -3149,7 +3149,9 @@ def cont_map( """ return_dict = self if inplace else dict() for key, value in self.items(): - this_key_chain = key if key_chain == "" else (key_chain + "/" + key) + this_key_chain = ( + key if key_chain == "" else (str(key_chain) + "/" + str(key)) + ) if isinstance(value, ivy.Container): ret = value.cont_map( func, diff --git a/ivy/functional/backends/jax/experimental/layers.py b/ivy/functional/backends/jax/experimental/layers.py index 8e2b9b5363c90..ee8186d616e73 100644 --- a/ivy/functional/backends/jax/experimental/layers.py +++ b/ivy/functional/backends/jax/experimental/layers.py @@ -508,8 +508,7 @@ def interpolate( "area", "nearest_exact", "tf_area", - "bicubic_tensorflow" - "bicubic", + "bicubic_tensorflow" "bicubic", "mitchellcubic", "lanczos3", "lanczos5", @@ -526,8 +525,7 @@ def interpolate( mode = ( "nearest" if mode == "nearest-exact" - else "bicubic" - "bicubic" + else "bicubic" "bicubic" if mode == "bicubic_tensorflow" else mode ) diff --git a/ivy/functional/backends/jax/general.py b/ivy/functional/backends/jax/general.py index b5f8857257478..a007e7d201f55 100644 --- a/ivy/functional/backends/jax/general.py +++ b/ivy/functional/backends/jax/general.py @@ -424,4 +424,4 @@ def vmap( func = ivy.output_to_native_arrays(func) return ivy.inputs_to_native_arrays( jax.vmap(func, in_axes=in_axes, out_axes=out_axes) - ) \ No newline at end of file + ) diff --git a/ivy/functional/backends/jax/searching.py b/ivy/functional/backends/jax/searching.py index 08e5bcb82114a..a75bd1d4b0b32 100644 --- a/ivy/functional/backends/jax/searching.py +++ b/ivy/functional/backends/jax/searching.py @@ -96,9 +96,10 @@ def where( # ----- # -def argwhere(x: JaxArray, - /, - *, - out: Optional[JaxArray] = None, +def argwhere( + x: JaxArray, + /, + *, + out: Optional[JaxArray] = None, ) -> JaxArray: return jnp.argwhere(x) diff --git a/ivy/functional/backends/numpy/searching.py b/ivy/functional/backends/numpy/searching.py index 5653f92e42c64..731a82013bbb9 100644 --- a/ivy/functional/backends/numpy/searching.py +++ b/ivy/functional/backends/numpy/searching.py @@ -101,9 +101,10 @@ def where( # ----- # -def argwhere(x: np.ndarray, - /, - *, - out: Optional[np.ndarray] = None, +def argwhere( + x: np.ndarray, + /, + *, + out: Optional[np.ndarray] = None, ) -> np.ndarray: return np.argwhere(x) diff --git a/ivy/functional/backends/paddle/creation.py b/ivy/functional/backends/paddle/creation.py index b69f8e247f68a..c081c5013bf04 100644 --- a/ivy/functional/backends/paddle/creation.py +++ b/ivy/functional/backends/paddle/creation.py @@ -146,7 +146,7 @@ def asarray( if dtype is None: dtype = ivy.default_dtype(item=obj) with ivy.ArrayMode(False): - return ivy.squeeze(paddle.to_tensor(obj,dtype=dtype),0) + return ivy.squeeze(paddle.to_tensor(obj, dtype=dtype), 0) else: dtype = ivy.as_native_dtype((ivy.default_dtype(dtype=dtype, item=obj))) diff --git a/ivy/functional/backends/paddle/data_type.py b/ivy/functional/backends/paddle/data_type.py index 45a67205b1a79..6421f9b7be756 100644 --- a/ivy/functional/backends/paddle/data_type.py +++ b/ivy/functional/backends/paddle/data_type.py @@ -174,9 +174,7 @@ def broadcast_to( paddle.uint8, paddle.float16, ]: - return paddle.broadcast_to(x.cast("float32"), shape).cast( - x.dtype - ) + return paddle.broadcast_to(x.cast("float32"), shape).cast(x.dtype) elif x.dtype in [paddle.complex64, paddle.complex128]: x_real = paddle.broadcast_to(x.real(), shape) x_imag = paddle.broadcast_to(x.imag(), shape) diff --git a/ivy/functional/backends/paddle/experimental/elementwise.py b/ivy/functional/backends/paddle/experimental/elementwise.py index 0f78f0b23f827..4a88a6f4063e1 100644 --- a/ivy/functional/backends/paddle/experimental/elementwise.py +++ b/ivy/functional/backends/paddle/experimental/elementwise.py @@ -85,7 +85,7 @@ def float_power( @with_unsupported_device_and_dtypes( {"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version -) +) def exp2( x: Union[paddle.Tensor, float, list, tuple], /, @@ -93,7 +93,7 @@ def exp2( out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: with ivy.ArrayMode(False): - return ivy.pow(2,x) + return ivy.pow(2, x) @with_unsupported_device_and_dtypes( @@ -107,9 +107,9 @@ def copysign( out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: with ivy.ArrayMode(False): - x2 = ivy.where(ivy.equal(x2, 0),ivy.divide(1, x2),x2) + x2 = ivy.where(ivy.equal(x2, 0), ivy.divide(1, x2), x2) signs = ivy.sign(x2) - return ivy.multiply(ivy.abs(x1),signs) + return ivy.multiply(ivy.abs(x1), signs) def count_nonzero( @@ -214,24 +214,32 @@ def nan_to_num( neginf: Optional[Union[float, int]] = None, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: - with ivy.ArrayMode(False): + with ivy.ArrayMode(False): if ivy.is_int_dtype(x): - if posinf==None: + if posinf == None: posinf = ivy.iinfo(x).max - if neginf==None: - neginf= ivy.iinfo(x).min + if neginf == None: + neginf = ivy.iinfo(x).min elif ivy.is_float_dtype(x) or ivy.is_complex_dtype(x): - if posinf==None: + if posinf == None: posinf = ivy.finfo(x).max - if neginf==None: - neginf=ivy.finfo(x).min + if neginf == None: + neginf = ivy.finfo(x).min ret = ivy.where(ivy.isnan(x), paddle.to_tensor(nan, dtype=x.dtype), x) - ret = ivy.where(ivy.logical_and(ivy.isinf(ret), ret > 0), paddle.to_tensor(posinf, dtype=x.dtype), ret) - ret = ivy.where(ivy.logical_and(ivy.isinf(ret), ret < 0), paddle.to_tensor(neginf, dtype=x.dtype), ret) + ret = ivy.where( + ivy.logical_and(ivy.isinf(ret), ret > 0), + paddle.to_tensor(posinf, dtype=x.dtype), + ret, + ) + ret = ivy.where( + ivy.logical_and(ivy.isinf(ret), ret < 0), + paddle.to_tensor(neginf, dtype=x.dtype), + ret, + ) if copy: return ret.clone() else: - x= ret + x = ret return x diff --git a/ivy/functional/backends/paddle/manipulation.py b/ivy/functional/backends/paddle/manipulation.py index 8a730d45afdf5..9dc5f23272c77 100644 --- a/ivy/functional/backends/paddle/manipulation.py +++ b/ivy/functional/backends/paddle/manipulation.py @@ -200,9 +200,7 @@ def squeeze( "tried to squeeze a zero-dimensional input by axis {}".format(axis) ) if x.dtype in [paddle.int16, paddle.float16]: - return paddle.squeeze(x.cast("float32"), axis=axis).cast( - x.dtype - ) + return paddle.squeeze(x.cast("float32"), axis=axis).cast(x.dtype) return paddle.squeeze(x, axis=axis) diff --git a/ivy/functional/backends/paddle/random.py b/ivy/functional/backends/paddle/random.py index 9227605a6845d..9599f51986a27 100644 --- a/ivy/functional/backends/paddle/random.py +++ b/ivy/functional/backends/paddle/random.py @@ -154,7 +154,5 @@ def shuffle( shuffled_real = paddle.index_select(x.real(), indices) shuffled_imag = paddle.index_select(x.imag(), indices) return shuffled_real + 1j * shuffled_imag - return paddle.index_select(x.cast("float32"), indices).cast( - x.dtype - ) + return paddle.index_select(x.cast("float32"), indices).cast(x.dtype) return paddle.index_select(x, indices) diff --git a/ivy/functional/backends/torch/searching.py b/ivy/functional/backends/torch/searching.py index 37d045153c43b..e50bdcfbe99fb 100644 --- a/ivy/functional/backends/torch/searching.py +++ b/ivy/functional/backends/torch/searching.py @@ -111,9 +111,10 @@ def where( # ----- # -def argwhere(x: torch.Tensor, - /, - *, - out: Optional[torch.Tensor] = None, +def argwhere( + x: torch.Tensor, + /, + *, + out: Optional[torch.Tensor] = None, ) -> torch.Tensor: return torch.argwhere(x) diff --git a/ivy/functional/frontends/jax/_src/__init__.py b/ivy/functional/frontends/jax/_src/__init__.py index 286348a37fc01..f757bcb835275 100644 --- a/ivy/functional/frontends/jax/_src/__init__.py +++ b/ivy/functional/frontends/jax/_src/__init__.py @@ -1,4 +1,4 @@ # flake8: noqa from . import numpy from . import array -from . import api \ No newline at end of file +from . import api diff --git a/ivy/functional/frontends/jax/_src/api.py b/ivy/functional/frontends/jax/_src/api.py index 6aafbcc9ab0b9..4b3db5055b66d 100644 --- a/ivy/functional/frontends/jax/_src/api.py +++ b/ivy/functional/frontends/jax/_src/api.py @@ -1,11 +1,12 @@ import ivy -from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back, outputs_to_native_arrays -def vmap(fun, - in_axes=0, - out_axes=0, - axis_name=None, - axis_size=None, - spmd_axis_name=None - ): +from ivy.functional.frontends.jax.func_wrapper import ( + to_ivy_arrays_and_back, + outputs_to_native_arrays, +) + + +def vmap( + fun, in_axes=0, out_axes=0, axis_name=None, axis_size=None, spmd_axis_name=None +): fun = outputs_to_native_arrays(fun) - return to_ivy_arrays_and_back(ivy.vmap(fun, in_axes=in_axes, out_axes=out_axes)) \ No newline at end of file + return to_ivy_arrays_and_back(ivy.vmap(fun, in_axes=in_axes, out_axes=out_axes)) diff --git a/ivy/functional/frontends/jax/func_wrapper.py b/ivy/functional/frontends/jax/func_wrapper.py index 94ac43c9e46f7..4b4a2d8a0edee 100644 --- a/ivy/functional/frontends/jax/func_wrapper.py +++ b/ivy/functional/frontends/jax/func_wrapper.py @@ -160,6 +160,7 @@ def new_fn(*args, dtype=None, **kwargs): dtype_pos = list(inspect.signature(fn).parameters).index("dtype") return new_fn + def outputs_to_native_arrays(fn: Callable): @functools.wraps(fn) def new_fn(*args, **kwargs): @@ -167,17 +168,22 @@ def new_fn(*args, **kwargs): if isinstance(ret, jax_frontend.DeviceArray): ret = ret.ivy_array.data return ret - #return ivy.to_native(_to_ivy_array(ret)) - return new_fn + # return ivy.to_native(_to_ivy_array(ret)) + + return new_fn def inputs_to_frontend_arrays(fn: Callable): @functools.wraps(fn) def new_fn(*args, **kwargs): args = ivy.nested_map(args, lambda x: jax_frontend.DeviceArray(ivy.array(x))) - kwargs = ivy.nested_map(kwargs, lambda x: jax_frontend.DeviceArray(ivy.array(x))) + kwargs = ivy.nested_map( + kwargs, lambda x: jax_frontend.DeviceArray(ivy.array(x)) + ) return fn(*args, **kwargs) + return new_fn + def to_frontend_arrays_and_back(fn: Callable) -> Callable: return outputs_to_native_arrays(inputs_to_frontend_arrays(fn)) diff --git a/ivy/functional/frontends/jax/numpy/manipulations.py b/ivy/functional/frontends/jax/numpy/manipulations.py index a966c9f67b7c1..c94929c7b328b 100644 --- a/ivy/functional/frontends/jax/numpy/manipulations.py +++ b/ivy/functional/frontends/jax/numpy/manipulations.py @@ -257,3 +257,12 @@ def hamming(M): n = ivy.arange(M) ret = 0.54 - 0.46 * ivy.cos(2.0 * ivy.pi * n / (M - 1)) return ret + + +@to_ivy_arrays_and_back +def hanning(M): + if M <= 1: + return ivy.ones([M], dtype=ivy.float64) + n = ivy.arange(M) + ret = 0.5 * (1 - ivy.cos(2.0 * ivy.pi * n / (M - 1))) + return ret diff --git a/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py b/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py index 0924f1fdef09b..ebb3363e64039 100644 --- a/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py +++ b/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py @@ -47,3 +47,10 @@ def indices(dimensions, dtype=int, sparse=False): else: res[i] = idx return res + + +# unravel_index +@to_ivy_arrays_and_back +def unravel_index(indices, shape, order='C'): + ret = [x.astype("int64") for x in ivy.unravel_index(indices, shape)] + return tuple(ret) diff --git a/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py b/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py index 4686516deef30..74476c1f41127 100644 --- a/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py +++ b/ivy/functional/frontends/numpy/mathematical_functions/other_special_functions.py @@ -10,16 +10,15 @@ ) - @to_ivy_arrays_and_back @from_zero_dim_arrays_to_scalar -def unwrap(p, discont=None, axis=-1, *, period=2*ivy.pi): +def unwrap(p, discont=None, axis=-1, *, period=2 * ivy.pi): p = ivy.asarray(p) nd = p.ndim dd = ivy.diff(p, axis=axis) if discont is None: - discont = period/2 - slice1 = [slice(None, None)]*nd # full slices + discont = period / 2 + slice1 = [slice(None, None)] * nd # full slices slice1[axis] = ivy.slice(1, None) slice1 = ivy.tuple(slice1) dtype = ivy.result_type(dd, period) @@ -32,8 +31,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*ivy.pi): interval_low = -interval_high ddmod = ivy.mod(dd - interval_low, period) + interval_low if boundary_ambiguous: - ivy.copyto(ddmod, interval_high, - where=(ddmod == interval_low) & (dd > 0)) + ivy.copyto(ddmod, interval_high, where=(ddmod == interval_low) & (dd > 0)) ph_correct = ddmod - dd ivy.copyto(ph_correct, 0, where=ivy.abs(dd) < discont) up = ivy.array(p, copy=True, dtype=dtype) diff --git a/ivy/functional/frontends/tensorflow/general_functions.py b/ivy/functional/frontends/tensorflow/general_functions.py index cf144b1596380..91566d64c02de 100644 --- a/ivy/functional/frontends/tensorflow/general_functions.py +++ b/ivy/functional/frontends/tensorflow/general_functions.py @@ -281,8 +281,7 @@ def strided_slice( ) ) begin, end, strides = map( - lambda x: ivy.array(x) if isinstance(x, int) else x, - [begin, end, strides] + lambda x: ivy.array(x) if isinstance(x, int) else x, [begin, end, strides] ) num_defined = len(begin) strides = ivy.repeat(ivy.array(1), num_defined) if strides is None else strides @@ -292,7 +291,7 @@ def strided_slice( ) begin, end, strides = map( lambda x: [ivy.to_scalar(i) for i in x] if ivy.is_ivy_array(x) else x, - [begin, end, strides] + [begin, end, strides], ) for i, v in enumerate(shrink_axis_mask): if v == 1: @@ -320,12 +319,21 @@ def strided_slice( end = end + [None] * num_missing strides = strides + [1] * num_missing else: - begin = begin[:ellipsis_index] + [None] * (num_missing + 1) + \ - begin[ellipsis_index + 1:] - end = end[:ellipsis_index] + [None] * (num_missing + 1) + \ - end[ellipsis_index + 1:] - strides = strides[:ellipsis_index] + [1] * (num_missing + 1) + \ - strides[ellipsis_index + 1:] + begin = ( + begin[:ellipsis_index] + + [None] * (num_missing + 1) + + begin[ellipsis_index + 1 :] + ) + end = ( + end[:ellipsis_index] + + [None] * (num_missing + 1) + + end[ellipsis_index + 1 :] + ) + strides = ( + strides[:ellipsis_index] + + [1] * (num_missing + 1) + + strides[ellipsis_index + 1 :] + ) full_slice = () for i, _ in enumerate(begin): if new_axis_mask[i]: @@ -345,8 +353,11 @@ def strided_slice( if all(i is None for i in full_slice): full_slice += (...,) ret = input_[full_slice] - shrink_indices = [i for i, v in enumerate(shrink_axis_mask) - if v and i < len(ret.shape) and ret.shape[i] == 1] + shrink_indices = [ + i + for i, v in enumerate(shrink_axis_mask) + if v and i < len(ret.shape) and ret.shape[i] == 1 + ] ret = ivy.squeeze(ret, axis=shrink_indices) return ret diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index 1beda8f79c274..eafe6383ad97f 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -757,7 +757,7 @@ def bitwise_and_(self, other): def atan2_(self, other): self._ivy_array = self.atan2(other).ivy_array return self - + def fmin(self, other, out=None): return torch_frontend.fmin(self._ivy_array, other, out=out) diff --git a/ivy/functional/ivy/__init__.py b/ivy/functional/ivy/__init__.py index 0117c7862606a..c6713e426b7f3 100644 --- a/ivy/functional/ivy/__init__.py +++ b/ivy/functional/ivy/__init__.py @@ -51,6 +51,7 @@ if not ( name.startswith("_") or name == "ivy" + or (callable(thing) and "ivy" not in thing.__module__) or (isinstance(thing, types.ModuleType) and "ivy" not in thing.__name__) ) ] diff --git a/ivy/functional/ivy/experimental/__init__.py b/ivy/functional/ivy/experimental/__init__.py index 9fbe20fc0e151..3ea3f5c35b3f3 100644 --- a/ivy/functional/ivy/experimental/__init__.py +++ b/ivy/functional/ivy/experimental/__init__.py @@ -29,6 +29,7 @@ if not ( name.startswith("_") or name == "ivy" + or (callable(thing) and "ivy" not in thing.__module__) or (isinstance(thing, types.ModuleType) and "ivy" not in thing.__name__) ) ] diff --git a/ivy/functional/ivy/experimental/layers.py b/ivy/functional/ivy/experimental/layers.py index 1d8d2b294cdf6..cad35bec053de 100644 --- a/ivy/functional/ivy/experimental/layers.py +++ b/ivy/functional/ivy/experimental/layers.py @@ -1,7 +1,9 @@ # global import math import itertools +import functools from typing import Optional, Union, Tuple, Literal, Sequence +from functools import reduce # local import ivy @@ -1125,9 +1127,9 @@ def _dim_scale_factor(input_size, output_size, align_corners, scales): def _mitchellcubic_kernel(x): absx = abs(x) if absx < 1: - return (7 * absx ** 3 - 12 * absx ** 2 + 6) / 6 + return (7 * absx**3 - 12 * absx**2 + 6) / 6 elif absx < 2: - return (-absx ** 3 + 6 * absx ** 2 - 11 * absx + 6) / 6 + return (-(absx**3) + 6 * absx**2 - 11 * absx + 6) / 6 else: return 0 @@ -1173,6 +1175,7 @@ def _compute_weight_mat( 0, ) + def _upsample_cubic_convolution1(x, A): return ((A + 2) * x - (A + 3)) * x * x + 1 @@ -1195,9 +1198,11 @@ def _upsample_cubic_interp1d(coeffs, ts): coeffs2 = _upsample_get_cubic_coefficients(ts) return _sum_tensors(c1 * c2 for (c1, c2) in zip(coeffs, coeffs2)) + def _sum_tensors(ts): return reduce(ivy.add, ts) + def _upsample_bicubic2d_default( a, output_size, @@ -1271,8 +1276,7 @@ def interpolate( "area", "nearest_exact", "tf_area", - "bicubic_tensorflow" - "bicubic", + "bicubic_tensorflow" "bicubic", "mitchellcubic", "lanczos3", "lanczos5", @@ -1466,26 +1470,36 @@ def interpolate( top = int(math.floor(p_i - 2)) bottom = int(math.ceil(p_i + 2)) kernel_w = ivy.array( - [_mitchellcubic_kernel((p_j - j) / scale_factor_w) - for i in range(left, right)]) + [ + _mitchellcubic_kernel((p_j - j) / scale_factor_w) + for i in range(left, right) + ] + ) kernel_h = ivy.array( - [_mitchellcubic_kernel((p_i - i) / scale_factor_h) - for j in range(top, bottom)]) + [ + _mitchellcubic_kernel((p_i - i) / scale_factor_h) + for j in range(top, bottom) + ] + ) left_pad = max(0, -left) right_pad = max(0, right - in_width) top_pad = max(0, -top) bottom_pad = max(0, bottom - in_height) - pad_width = [(0, 0), (0, 0)] * (len(x.shape) - 3) + \ - [(top_pad, bottom_pad), (left_pad, right_pad)] - padded_x = ivy.pad(x, pad_width, mode='edge') + pad_width = [(0, 0), (0, 0)] * (len(x.shape) - 3) + [ + (top_pad, bottom_pad), + (left_pad, right_pad), + ] + padded_x = ivy.pad(x, pad_width, mode="edge") for b in range(batch): for c in range(channels): patch = padded_x[ - b, c, - top + top_pad:bottom + top_pad, - left + left_pad:right + left_pad] + b, + c, + top + top_pad : bottom + top_pad, + left + left_pad : right + left_pad, + ] ret[b, c, i, j] = ivy.sum( - kernel_h[:, ivy.newaxis] * patch * kernel_w[ivy.newaxis,:] + kernel_h[:, ivy.newaxis] * patch * kernel_w[ivy.newaxis, :] ) elif mode == "gaussian": ratio_h = size[0] / x.shape[-2] @@ -1501,8 +1515,9 @@ def interpolate( kernel_w /= ivy.sum(kernel_w) pad_width = [(0, 0), (0, 0)] * (len(x.shape) - 3) + [ (int(math.ceil(3 * sigma)), int(math.ceil(3 * sigma))), - (int(math.ceil(3 * sigma)), int(math.ceil(3 * sigma)))] - padded_x = ivy.pad(x, pad_width, mode='constant') + (int(math.ceil(3 * sigma)), int(math.ceil(3 * sigma))), + ] + padded_x = ivy.pad(x, pad_width, mode="constant") output_shape = x.shape[:2] + size ret = ivy.zeros(output_shape, dtype=x.dtype) for i in range(size[0]): @@ -1512,9 +1527,11 @@ def interpolate( for b in range(x.shape[0]): for c in range(x.shape[1]): patch = padded_x[ - b, c, - p_i - kernel_size // 2: p_i + kernel_size // 2 + 1, - p_j - kernel_size // 2: p_j + kernel_size // 2 + 1] + b, + c, + p_i - kernel_size // 2 : p_i + kernel_size // 2 + 1, + p_j - kernel_size // 2 : p_j + kernel_size // 2 + 1, + ] ret[b, c, i, j] = ivy.sum( kernel_h[ivy.newaxis, :] * patch * kernel_w[:, ivy.newaxis] ) diff --git a/ivy_tests/conftest.py b/ivy_tests/conftest.py index 57feddfd2b0ca..c331d56096cba 100644 --- a/ivy_tests/conftest.py +++ b/ivy_tests/conftest.py @@ -10,7 +10,6 @@ from hypothesis.extra.redis import RedisExampleDatabase - hypothesis_cache = os.getcwd() + "/.hypothesis/examples/" redis_connect_dev = None redis_connect_master = None @@ -115,6 +114,3 @@ def pytest_configure(config): print_blob=True, ) settings.load_profile("ivy_profile") - - - diff --git a/ivy_tests/test_ivy/helpers/globals.py b/ivy_tests/test_ivy/helpers/globals.py index 309b79bfdc160..36a80a8b02537 100644 --- a/ivy_tests/test_ivy/helpers/globals.py +++ b/ivy_tests/test_ivy/helpers/globals.py @@ -31,7 +31,7 @@ if "paddle" in available_frameworks: FWS_DICT["paddle"] = lambda x=None: _get_ivy_paddle(x) - + # This is used to make sure the variable is not being overriden _Notsetval = object() diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_lax_operators.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_lax_operators.py index 78d925d8aba36..10e2c29fa7b36 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_lax_operators.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_lax_operators.py @@ -2351,12 +2351,16 @@ def _squeeze_helper(draw): fn_tree="jax.lax.squeeze", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), - shape=st.shared(helpers.get_shape( - allow_none=False, - min_num_dims=1, - max_num_dims=10, - min_dim_size=1, - max_dim_size=5), key="value_shape") + shape=st.shared( + helpers.get_shape( + allow_none=False, + min_num_dims=1, + max_num_dims=10, + min_dim_size=1, + max_dim_size=5, + ), + key="value_shape", + ), ), dim=_squeeze_helper(), ) diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_creation.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_creation.py index 1c91909e2ff6e..788f88eb464b3 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_creation.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_creation.py @@ -695,8 +695,7 @@ def test_jax_numpy_copy( # single @handle_frontend_test( fn_tree="jax.numpy.single", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float")), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), ) def test_jax_numpy_single( dtype_and_x, @@ -712,5 +711,5 @@ def test_jax_numpy_single( test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - x=x[0] + x=x[0], ) diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py index 9817ca2a0589e..22fcc67982707 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py @@ -202,7 +202,6 @@ def test_jax_numpy_tril_indices( ) - # triu_indices @handle_frontend_test( fn_tree="jax.numpy.triu_indices", @@ -230,6 +229,7 @@ def test_jax_numpy_triu_indices( on_device=on_device, ) + # triu_indices_from @handle_frontend_test( fn_tree="jax.numpy.triu_indices_from", @@ -242,7 +242,6 @@ def test_jax_numpy_triu_indices( k=helpers.ints(min_value=-5, max_value=5), test_with_out=st.just(False), ) - def test_jax_numpy_triu_indices_from( dtype_and_x, k, @@ -261,7 +260,8 @@ def test_jax_numpy_triu_indices_from( arr=x[0], k=k, ) - + + # tril_indices_from @handle_frontend_test( fn_tree="jax.numpy.tril_indices_from", @@ -274,7 +274,6 @@ def test_jax_numpy_triu_indices_from( k=helpers.ints(min_value=-5, max_value=5), test_with_out=st.just(False), ) - def test_jax_numpy_tril_indices_from( dtype_and_x, k, @@ -314,6 +313,8 @@ def max_value_as_shape_prod(draw): ) ) return dtype_and_x, shape + + @handle_frontend_test( fn_tree="jax.numpy.unravel_index", dtype_x_shape=max_value_as_shape_prod(), diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_manipulation.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_manipulation.py index 7f1daa12df131..cb98717aa517c 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_manipulation.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_manipulation.py @@ -1455,7 +1455,7 @@ def test_jax_numpy_row_stack( # hamming @handle_frontend_test( fn_tree="jax.numpy.hamming", - m=helpers.ints(min_value=0,max_value=20), + m=helpers.ints(min_value=0, max_value=20), ) def test_jax_numpy_hamming( m, @@ -1472,3 +1472,25 @@ def test_jax_numpy_hamming( on_device=on_device, M=m, ) + + +# hanning +@handle_frontend_test( + fn_tree="jax.numpy.hanning", + m=helpers.ints(min_value=0, max_value=20), +) +def test_jax_numpy_hanning( + m, + frontend, + test_flags, + fn_tree, + on_device, +): + helpers.test_frontend_function( + input_dtypes=["int64"], + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + M=m, + ) diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_math.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_math.py index fbdea848132ea..6165031e7cfae 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_math.py @@ -11,7 +11,9 @@ _get_second_matrix_and_dtype, _get_dtype_value1_value2_axis_for_tensordot, ) -from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_elementwise import ldexp_args +from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_elementwise import ( + ldexp_args, +) # absolute diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_creation_routines/test_building_matrices.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_creation_routines/test_building_matrices.py index 8ae6fc27b55f8..4013053c27ddd 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_creation_routines/test_building_matrices.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_creation_routines/test_building_matrices.py @@ -148,14 +148,7 @@ def test_numpy_diag( test_with_out=st.just(False), ) def test_numpy_vander( - *, - fn_tree, - dtype_and_x, - N, - increasing, - test_flags, - frontend, - on_device + *, fn_tree, dtype_and_x, N, increasing, test_flags, frontend, on_device ): input_dtype, x = dtype_and_x helpers.test_frontend_function( @@ -168,4 +161,3 @@ def test_numpy_vander( N=N, increasing=increasing, ) - diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_fft/test_discrete_fourier_transform.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_fft/test_discrete_fourier_transform.py index dfd518bcf3860..c4eafe94405aa 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_fft/test_discrete_fourier_transform.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_fft/test_discrete_fourier_transform.py @@ -5,17 +5,10 @@ @handle_frontend_test( fn_tree="numpy.fft.ifft", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - shape=(4,), array_api_dtypes=True - ) -) -def test_numpy_iftt( - dtype_and_x, - frontend, - test_flags, - fn_tree, - on_device -): + available_dtypes=helpers.get_dtypes("float"), shape=(4,), array_api_dtypes=True + ), +) +def test_numpy_iftt(dtype_and_x, frontend, test_flags, fn_tree, on_device): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, @@ -27,5 +20,5 @@ def test_numpy_iftt( a=x, n=None, axis=-1, - norm=None + norm=None, ) diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_indexing_routines/test_indexing_like_operations.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_indexing_routines/test_indexing_like_operations.py index 836c1fe263664..b243cc878c939 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_indexing_routines/test_indexing_like_operations.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_indexing_routines/test_indexing_like_operations.py @@ -1,5 +1,6 @@ # Testing Function # global +import numpy as np from hypothesis import strategies as st # local @@ -190,3 +191,50 @@ def test_indices( dtype=dtype[0], sparse=sparse, ) + + +# unravel_index +@st.composite +def max_value_as_shape_prod(draw): + shape = draw( + helpers.get_shape( + min_num_dims=1, + max_num_dims=5, + min_dim_size=1, + max_dim_size=5, + ) + ) + dtype_and_x = draw( + helpers.dtype_values_axis( + available_dtypes=helpers.get_dtypes("valid"), + min_value=0, + max_value=np.prod(shape) - 1, + ) + ) + return dtype_and_x, shape + + +@handle_frontend_test( + fn_tree="numpy.unravel_index", + dtype_x_shape=max_value_as_shape_prod(), + test_with_out=st.just(False), +) +def test_numpy_unravel_index( + *, + dtype_x_shape, + test_flags, + frontend, + fn_tree, + on_device, +): + dtype_and_x, shape = dtype_x_shape + input_dtype, x = dtype_and_x[0], dtype_and_x[1] + helpers.test_frontend_function( + input_dtypes=input_dtype, + test_flags=test_flags, + frontend=frontend, + fn_tree=fn_tree, + on_device=on_device, + indices=x[0], + shape=shape, + ) diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_linear_algebra/test_matrix_eigenvalues.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_linear_algebra/test_matrix_eigenvalues.py index 0fe9086bccb5f..9f4d86e9276a8 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_linear_algebra/test_matrix_eigenvalues.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_linear_algebra/test_matrix_eigenvalues.py @@ -51,18 +51,18 @@ def test_numpy_eigvalsh( shape=helpers.ints(min_value=2, max_value=5).map(lambda x: tuple([x, x])), ).filter( lambda x: "float16" not in x[0] - and "bfloat16" not in x[0] - and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon - and np.linalg.det(np.asarray(x[1][0])) != 0 + and "bfloat16" not in x[0] + and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon + and np.linalg.det(np.asarray(x[1][0])) != 0 ), test_with_out=st.just(False), ) def test_numpy_eig( - dtype_and_x, - on_device, - fn_tree, - frontend, - test_flags, + dtype_and_x, + on_device, + fn_tree, + frontend, + test_flags, ): dtype, x = dtype_and_x x = np.array(x[0], dtype=dtype[0]) @@ -105,9 +105,9 @@ def test_numpy_eig( shape=helpers.ints(min_value=2, max_value=5).map(lambda x: tuple([x, x])), ).filter( lambda x: "float16" not in x[0] - and "bfloat16" not in x[0] - and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon - and np.linalg.det(np.asarray(x[1][0])) != 0 + and "bfloat16" not in x[0] + and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon + and np.linalg.det(np.asarray(x[1][0])) != 0 ), UPLO=st.sampled_from(("L", "U")), test_with_out=st.just(False), diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_adding_and_removing_elements.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_adding_and_removing_elements.py index dbc41e2ad13d5..415d38d393558 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_adding_and_removing_elements.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_adding_and_removing_elements.py @@ -77,20 +77,18 @@ def test_numpy_append( @handle_frontend_test( fn_tree="numpy.trim_zeros", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), - min_num_dims=1, - max_num_dims=1 + available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, max_num_dims=1 ), - trim=st.sampled_from(['f', 'b', 'fb']) + trim=st.sampled_from(["f", "b", "fb"]), ) def test_numpy_trim_zeros( - frontend, - on_device, - *, - dtype_and_x, - trim, - fn_tree, - test_flags, + frontend, + on_device, + *, + dtype_and_x, + trim, + fn_tree, + test_flags, ): input_dtypes, x = dtype_and_x helpers.test_frontend_function( @@ -100,5 +98,5 @@ def test_numpy_trim_zeros( fn_tree=fn_tree, on_device=on_device, filt=x[0], - trim=trim + trim=trim, ) diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_basic_operations.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_basic_operations.py index a7722256a4e0f..e2373155d4fbd 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_basic_operations.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_basic_operations.py @@ -12,16 +12,18 @@ @st.composite def generate_copyto_args(draw): - input_dtypes, xs, casting, _ = draw(np_frontend_helpers.dtypes_values_casting_dtype( - arr_func=[ - lambda: helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - num_arrays=2, - shared_dtype=True, - min_num_dims=1, - ) - ], - )) + input_dtypes, xs, casting, _ = draw( + np_frontend_helpers.dtypes_values_casting_dtype( + arr_func=[ + lambda: helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + num_arrays=2, + shared_dtype=True, + min_num_dims=1, + ) + ], + ) + ) where = draw(np_frontend_helpers.where(shape=xs[0].shape)) return input_dtypes, xs, casting, where @@ -30,7 +32,7 @@ def generate_copyto_args(draw): @handle_frontend_test( fn_tree="numpy.copyto", test_with_out=st.just(False), - copyto_args=generate_copyto_args() + copyto_args=generate_copyto_args(), ) def test_numpy_copyto( copyto_args, @@ -38,7 +40,7 @@ def test_numpy_copyto( _, xs, casting, where = copyto_args if isinstance(where, list) or isinstance(where, tuple): where = where[0] - + src_ivy = ivy_np.array(xs[0]) dst_ivy = ivy_np.array(xs[1]) ivy_np.copyto(dst_ivy, src_ivy, where=where, casting=casting) diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_exponents_and_logarithms.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_exponents_and_logarithms.py index ad1e546248ce2..0e596c2fb39c4 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_exponents_and_logarithms.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_exponents_and_logarithms.py @@ -5,7 +5,9 @@ import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers from ivy_tests.test_ivy.helpers import handle_frontend_test -from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_elementwise import ldexp_args +from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_elementwise import ( + ldexp_args, +) # exp @handle_frontend_test( @@ -470,6 +472,7 @@ def test_numpy_i0( x=x[0], ) + # ldexp @handle_frontend_test( fn_tree="numpy.ldexp", diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_miscellaneous.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_miscellaneous.py index 52722cdfe9503..3ec2f4e739856 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_miscellaneous.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_miscellaneous.py @@ -229,7 +229,7 @@ def test_numpy_reciprocal( order="K", dtype=dtype, subok=True, - ) + ) # square diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_rounding.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_rounding.py index 755b994262738..369647f86dd57 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_rounding.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_rounding.py @@ -88,7 +88,7 @@ def test_numpy_fix( get_dtypes_kind="float", ), where=np_frontend_helpers.where(), - number_positional_args = np_frontend_helpers.get_num_positional_args_ufunc( + number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="floor" ), ) @@ -121,7 +121,7 @@ def test_numpy_floor( subok=True, ) - + # trunc @handle_frontend_test( fn_tree="numpy.trunc", diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_ndarray/test_ndarray.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_ndarray/test_ndarray.py index 47d199253ada1..22a0397548803 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_ndarray/test_ndarray.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_ndarray/test_ndarray.py @@ -571,7 +571,8 @@ def test_numpy_instance_min( on_device=on_device, ) -#prod + +# prod @handle_frontend_method( class_tree=CLASS_TREE, init_tree="numpy.array", @@ -594,12 +595,16 @@ def test_numpy_ndarray_prod( if ivy.current_backend_str() == "torch": assume(not method_flags.as_variable[0]) - where, input_dtypes, method_flags = np_frontend_helpers.handle_where_and_array_bools( + ( + where, + input_dtypes, + method_flags, + ) = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=method_flags, ) - where = ivy.array(where, dtype='bool') + where = ivy.array(where, dtype="bool") helpers.test_frontend_method( init_input_dtypes=input_dtypes, init_all_as_kwargs_np={ @@ -608,7 +613,7 @@ def test_numpy_ndarray_prod( method_input_dtypes=input_dtypes, method_all_as_kwargs_np={ "axis": axis, - "dtype":dtype, + "dtype": dtype, "keepdims": keep_dims, "initial": initial, "where": where, @@ -1179,7 +1184,7 @@ def test_numpy_instance_std( on_device=on_device, ) - + # fill @handle_frontend_method( class_tree=CLASS_TREE, @@ -1215,7 +1220,7 @@ def test_numpy_ndarray_fill( on_device=on_device, ) - + @handle_frontend_method( class_tree=CLASS_TREE, init_tree="numpy.array", @@ -2679,7 +2684,7 @@ def test_numpy_instance_view( frontend_method_data=frontend_method_data, on_device=on_device, ) - + # mod @handle_frontend_method( diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_functions.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_functions.py index 44b834f75b922..fb21c870aebfb 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_functions.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_random/test_functions.py @@ -333,12 +333,12 @@ def test_numpy_shuffle( test_with_out=st.just(False), ) def test_numpy_standard_normal( - input_dtypes, - size, - frontend, - test_flags, - fn_tree, - on_device, + input_dtypes, + size, + frontend, + test_flags, + fn_tree, + on_device, ): helpers.test_frontend_function( input_dtypes=input_dtypes, diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_general_functions.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_general_functions.py index bb2aeddba5785..502520ed57d5c 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_general_functions.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_general_functions.py @@ -1043,9 +1043,7 @@ def _strided_slice_helper(draw): masks = draw( st.lists( st.integers(min_value=0, max_value=2**ndims - 1), min_size=5, max_size=5 - ).filter( - lambda x: bin(x[2])[2:].count("1") <= min(len(shape)-1, 1) - ) + ).filter(lambda x: bin(x[2])[2:].count("1") <= min(len(shape) - 1, 1)) ) begin, end, strides = [], [], [] for i in shape: @@ -1060,8 +1058,8 @@ def _strided_slice_helper(draw): if v == 1: skip = draw(st.integers(min_value=0, max_value=ndims)) begin, end, strides = map( - lambda x: x[:i] + x[i+skip:] if i+skip < ndims else x[:i], - [begin, end, strides] + lambda x: x[:i] + x[i + skip :] if i + skip < ndims else x[:i], + [begin, end, strides], ) break return dtype, x, np.array(begin), np.array(end), np.array(strides), masks @@ -1624,7 +1622,7 @@ def test_tensorflow_unstack( ) -# reverse +# reverse @st.composite def reverse_helper(draw): dtype, x, shape = draw( @@ -1670,4 +1668,4 @@ def test_tensorflow_reverse( on_device=on_device, tensor=x[0], axis=axis[0], - ) + ) diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py index d8462c05b05ac..59a79be0ee399 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py @@ -1665,8 +1665,8 @@ def test_tensorflow_acos( on_device=on_device, x=x[0], ) - - + + # acosh @handle_frontend_test( fn_tree="tensorflow.math.acosh", @@ -1692,7 +1692,7 @@ def test_tensorflow_acosh( on_device=on_device, x=x[0], ) - + # square @handle_frontend_test( @@ -1964,4 +1964,4 @@ def test_tensorflow_sinh( fn_tree=fn_tree, on_device=on_device, x=x[0], - ) \ No newline at end of file + ) diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_nn.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_nn.py index 1f20c2c2593de..9a7e8aaa022e3 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_nn.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_nn.py @@ -247,8 +247,10 @@ def _x_and_filters( ) if type == "separable": p_filter_shape = ( - 1, 1, filter_shape[-1] * filter_shape[-2], - draw(helpers.ints(min_value=1, max_value=3)) + 1, + 1, + filter_shape[-1] * filter_shape[-2], + draw(helpers.ints(min_value=1, max_value=3)), ) p_filters = draw( helpers.array_values( diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py index ae81f47270ee9..409e88de11fe1 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py @@ -3514,9 +3514,9 @@ def test_tensorflow_BandedTriangularSolve( test_flags, fn_tree, on_device, - matrix, - rhs, - lower, + matrix, + rhs, + lower, adjoint, ): input_dtype, x = dtype_and_x @@ -3526,11 +3526,11 @@ def test_tensorflow_BandedTriangularSolve( frontend=frontend, fn_tree=fn_tree, on_device=on_device, - matrix=x[0], - rhs=x[1], - lower=lower, + matrix=x[0], + rhs=x[1], + lower=lower, adjoint=adjoint, - ) + ) # BatchMatMul @@ -3547,7 +3547,7 @@ def test_tensorflow_BatchMatMul( test_flags, fn_tree, on_device, - adj_x, + adj_x, adj_y, ): input_dtype, x = dtype_and_x @@ -3559,9 +3559,9 @@ def test_tensorflow_BatchMatMul( on_device=on_device, x=x[0], y=x[1], - adj_x=adj_x, + adj_x=adj_x, adj_y=adj_y, - ) + ) # BatchMatMulV2 @@ -3578,7 +3578,7 @@ def test_tensorflow_BatchMatMulV2( test_flags, fn_tree, on_device, - adj_x, + adj_x, adj_y, ): input_dtype, x = dtype_and_x @@ -3590,9 +3590,9 @@ def test_tensorflow_BatchMatMulV2( on_device=on_device, x=x[0], y=x[1], - adj_x=adj_x, + adj_x=adj_x, adj_y=adj_y, - ) + ) # BatchMatMulV3 @@ -3610,7 +3610,7 @@ def test_tensorflow_BatchMatMulV3( fn_tree, on_device, Tout, - adj_x, + adj_x, adj_y, ): input_dtype, x = dtype_and_x @@ -3623,7 +3623,6 @@ def test_tensorflow_BatchMatMulV3( x=x[0], y=x[1], Tout=Tout, - adj_x=adj_x, + adj_x=adj_x, adj_y=adj_y, - ) - + ) \ No newline at end of file diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py index e31b264b07305..efdd9d92735cf 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py @@ -316,12 +316,12 @@ def _get_symmetrix_matrix(draw): test_with_out=st.just(False), ) def test_torch_eigvals( - *, - dtype_x, - frontend, - test_flags, - fn_tree, - on_device, + *, + dtype_x, + frontend, + test_flags, + fn_tree, + on_device, ): input_dtype, x = dtype_x @@ -361,9 +361,9 @@ def test_torch_eigvals( frontend_ret = np.sort(frontend_ret) frontend_ret_modulus = np.zeros(len(frontend_ret), dtype=np.float64) for i in range(len(frontend_ret)): - frontend_ret_modulus[i] = math.sqrt(math.pow(frontend_ret[i].real, - 2) + math.pow(frontend_ret[i].imag, - 2)) + frontend_ret_modulus[i] = math.sqrt( + math.pow(frontend_ret[i].real, 2) + math.pow(frontend_ret[i].imag, 2) + ) ret = ivy.to_numpy(ret).astype(str(frontend_ret.dtype)) ret = np.sort(ret) @@ -994,9 +994,7 @@ def test_torch_tensorsolve( def _lu_factor_helper(draw): # generate input matrix of shape (*, m, n) and where '*' is one or more # batch dimensions - input_dtype = draw( - helpers.get_dtypes("float") - ) + input_dtype = draw(helpers.get_dtypes("float")) dim1 = draw(helpers.ints(min_value=2, max_value=3)) dim2 = draw(helpers.ints(min_value=2, max_value=3)) @@ -1058,7 +1056,7 @@ def test_torch_lu_factor( assert_all_close( ret_np=[LU, pivot], ret_from_gt_np=[frontend_LU, frontend_pivot], - ground_truth_backend=frontend + ground_truth_backend=frontend, ) @@ -1102,15 +1100,16 @@ def _vander_helper(draw): # generate input matrix of shape (*, n) and where '*' is one or more # batch dimensions N = draw(helpers.ints(min_value=2, max_value=5)) - if draw(helpers.floats(min_value=0, max_value=1.)) < 0.5: + if draw(helpers.floats(min_value=0, max_value=1.0)) < 0.5: N = None - shape = draw(helpers.get_shape(min_num_dims=1, - max_num_dims=5, - min_dim_size=2, - max_dim_size=10)) + shape = draw( + helpers.get_shape( + min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10 + ) + ) dtype = "float" - if draw(helpers.floats(min_value=0, max_value=1.)) < 0.5: + if draw(helpers.floats(min_value=0, max_value=1.0)) < 0.5: dtype = "integer" x = draw( @@ -1145,5 +1144,6 @@ def test_torch_vander( fn_tree=fn_tree, on_device=on_device, test_flags=test_flags, - x=x[0], N=N + x=x[0], + N=N, ) diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_reduction_ops.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_reduction_ops.py index c4c9eec839e7a..fb0214b1fe2d2 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_reduction_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_reduction_ops.py @@ -8,8 +8,9 @@ from ivy_tests.test_ivy.test_functional.test_core.test_statistical import ( statistical_dtype_values, ) -from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_statistical\ - import statistical_dtype_values as statistical_dtype_values_experimental +from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_statistical import ( + statistical_dtype_values as statistical_dtype_values_experimental, +) @handle_frontend_test( diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index f8fe54a933c09..31344a869edf8 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -5879,6 +5879,7 @@ def test_torch_instance_bitwise_xor( on_device=on_device, ) + # cumprod @handle_frontend_method( class_tree=CLASS_TREE, diff --git a/ivy_tests/test_ivy/test_functional/test_core/test_dtype.py b/ivy_tests/test_ivy/test_functional/test_core/test_dtype.py index 2edd6fab707c3..279acab66060b 100644 --- a/ivy_tests/test_ivy/test_functional/test_core/test_dtype.py +++ b/ivy_tests/test_ivy/test_functional/test_core/test_dtype.py @@ -886,7 +886,14 @@ def _composition_1(): "complex64", "complex128", ), - "paddle": ("uint16","uint32","uint64","bfloat16","complex64", "complex128",), + "paddle": ( + "uint16", + "uint32", + "uint64", + "bfloat16", + "complex64", + "complex128", + ), } @@ -900,7 +907,12 @@ def _composition_2(): "jax": ("complex64", "complex128"), "tensorflow": ("complex64", "complex128"), "torch": ("uint16", "uint32", "uint64", "float16", "complex64", "complex128"), - "paddle": ("uint16","uint32","uint64","bfloat16",), + "paddle": ( + "uint16", + "uint32", + "uint64", + "bfloat16", + ), } diff --git a/ivy_tests/test_ivy/test_functional/test_core/test_elementwise.py b/ivy_tests/test_ivy/test_functional/test_core/test_elementwise.py index 6a480066f0886..cd980994bcf1b 100644 --- a/ivy_tests/test_ivy/test_functional/test_core/test_elementwise.py +++ b/ivy_tests/test_ivy/test_functional/test_core/test_elementwise.py @@ -22,9 +22,7 @@ def not_too_close_to_zero(x): # abs @handle_test( fn_tree="functional.ivy.abs", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid") - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")), ) def test_abs( *, diff --git a/ivy_tests/test_ivy/test_functional/test_core/test_general.py b/ivy_tests/test_ivy/test_functional/test_core/test_general.py index 6f981677cae5c..bc06e52509b80 100644 --- a/ivy_tests/test_ivy/test_functional/test_core/test_general.py +++ b/ivy_tests/test_ivy/test_functional/test_core/test_general.py @@ -1746,7 +1746,7 @@ def _composition_1(): "complex64", "complex128", ), - "paddle": ("uint16","uint32","uint64","bfloat16","complex64", "complex128"), + "paddle": ("uint16", "uint32", "uint64", "bfloat16", "complex64", "complex128"), }, "gpu": { "numpy": ivy.all_dtypes, @@ -1775,7 +1775,12 @@ def _composition_2(): "jax": ("complex64", "complex128"), "tensorflow": ("complex64", "complex128"), "torch": ("uint16", "uint32", "uint64", "float16", "complex64", "complex128"), - "paddle": ("uint16","uint32","uint64","bfloat16",), + "paddle": ( + "uint16", + "uint32", + "uint64", + "bfloat16", + ), }, "gpu": { "numpy": ivy.all_dtypes, diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py index d1a511dbfb1f2..979a7971a88a9 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py @@ -1109,9 +1109,7 @@ def test_hypot( @handle_test( fn_tree="functional.ivy.experimental.binarizer", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid") - ), + dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")), threshold=helpers.floats(), container_flags=st.just([False]), ) diff --git a/ivy_tests/test_ivy/test_misc/test_backend_handler.py b/ivy_tests/test_ivy/test_misc/test_backend_handler.py index 8ff820f34cc14..0354e05049cdb 100644 --- a/ivy_tests/test_ivy/test_misc/test_backend_handler.py +++ b/ivy_tests/test_ivy/test_misc/test_backend_handler.py @@ -55,7 +55,9 @@ if "jax" in available_frameworks(): available_array_types_input.append(("jax", jnp.array(3.0))) if version.parse(jax.__version__) >= version.parse("0.4.1"): - available_array_types_class.append(("jax", "")) + available_array_types_class.append( + ("jax", "") + ) else: available_array_types_class.append( ("jax", "") @@ -222,11 +224,10 @@ def test_dynamic_backend_all_combos(middle_backend, end_backend): # add the necessary asserts to check if the data # of the objects are in the correct format - - assert isinstance(a.data,ivy.current_backend().NativeArray) - assert isinstance(ivy_cont["b"].data,ivy.current_backend().NativeArray) - + assert isinstance(a.data, ivy.current_backend().NativeArray) + assert isinstance(ivy_cont["b"].data, ivy.current_backend().NativeArray) + if end_backend == "numpy": assert isinstance(nativ_cont["b"].data, np.ndarray) elif end_backend == "jax": @@ -235,9 +236,9 @@ def test_dynamic_backend_all_combos(middle_backend, end_backend): if middle_backend not in ("jax", "numpy") and end_backend not in ("jax", "numpy"): # these frameworks don't support native variables assert ivy.current_backend().gradients.is_variable(nativ_cont["b"].data) - + else: - assert isinstance(nativ_cont["b"].data,ivy.current_backend().NativeArray) + assert isinstance(nativ_cont["b"].data, ivy.current_backend().NativeArray) def test_dynamic_backend_setter(): @@ -273,10 +274,10 @@ def test_variables(): ivy.set_backend("torch", dynamic=True) assert ivy.current_backend().gradients.is_variable(dyn_cont["w"].data) - + ivy.set_backend("paddle", dynamic=True) assert ivy.current_backend().gradients.is_variable(dyn_cont["w"].data) - + assert isinstance(stat_cont["w"], tf.Variable) diff --git a/ivy_tests/test_ivy/test_misc/test_func_wrapper.py b/ivy_tests/test_ivy/test_misc/test_func_wrapper.py index 901fa7550a628..4809f1edb7065 100644 --- a/ivy_tests/test_ivy/test_misc/test_func_wrapper.py +++ b/ivy_tests/test_ivy/test_misc/test_func_wrapper.py @@ -95,18 +95,22 @@ def test_integer_arrays_to_float(x, expected): ("x", "weight", "expected"), [ ([[1, 1], [1, 1]], [[1, 1], [1, 1], [1, 1]], True), - ([[1, 1], [1, 1]], [ - [[1, 1], [1, 1], [1, 1]], - [[1, 1], [1, 1], [1, 1]], - [[1, 1], [1, 1], [1, 1]] - ], False), - ] + ( + [[1, 1], [1, 1]], + [ + [[1, 1], [1, 1], [1, 1]], + [[1, 1], [1, 1], [1, 1]], + [[1, 1], [1, 1], [1, 1]], + ], + False, + ), + ], ) def test_handle_mixed_function(x, weight, expected): - test_fn = 'torch.nn.functional.linear' - if ivy.current_backend_str() != 'torch': + test_fn = "torch.nn.functional.linear" + if ivy.current_backend_str() != "torch": # ivy.matmul is used inside the compositional implementation - test_fn = 'ivy.matmul' + test_fn = "ivy.matmul" expected = True with patch(test_fn) as test_mock_function: ivy.linear(x, weight) diff --git a/run_tests_CLI/cron_tests_multi_version.py b/run_tests_CLI/cron_tests_multi_version.py index 8d129a5b94963..f664ad1361534 100644 --- a/run_tests_CLI/cron_tests_multi_version.py +++ b/run_tests_CLI/cron_tests_multi_version.py @@ -64,7 +64,7 @@ run_iter = int(sys.argv[1]) os.system( - "docker run -v `pwd`:/ivy -v `pwd`/.hypothesis:/.hypothesis unifyai/ivy:latest python3 -m pytest --disable-pytest-warnings ivy_tests/test_ivy --my_test_dump true > test_names" # noqa + "docker run -v `pwd`:/ivy -v `pwd`/.hypothesis:/.hypothesis unifyai/ivy:latest python3 -m pytest --disable-pytest-warnings ivy_tests/test_ivy --my_test_dump true > test_names" # noqa ) test_names_without_backend = [] test_names = []