diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index 2db2826ac45ee..a364b4efc6204 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -266,6 +266,9 @@ def abs(self): def abs_(self): self._ivy_array = self.abs().ivy_array return self + + def bitwise_not(self, *, out=None): + return torch_frontend.bitwise_not(self._ivy_array) def bitwise_and(self, other): return torch_frontend.bitwise_and(self._ivy_array, other) diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index e231384b9553d..f6c56acd9f83a 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -2620,6 +2620,39 @@ def test_torch_instance_is_cuda( ) +# bitwise_not +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="torch.tensor", + method_name="bitwise_not", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("integer"), + num_arrays=2, + ), +) +def test_torch_instance_bitwise_not( + dtype_and_x, + frontend_method_data, + init_flags, + method_flags, + frontend, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + init_all_as_kwargs_np={ + "data": x[0], + }, + method_input_dtypes=input_dtype, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + method_all_as_kwargs_np={ + }, + frontend=frontend, + ) + + # bitwise_and @handle_frontend_method( class_tree=CLASS_TREE,