diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index 70ad4c2a19434..fc31fe3f06f63 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -530,6 +530,11 @@ def flatten(self, start_dim, end_dim): def cumsum(self, dim, dtype): return torch_frontend.cumsum(self._ivy_array, dim, dtype=dtype) + @with_unsupported_dtypes({"1.11.0 and below": ("float16",)}, "torch") + def cumsum_(self, dim, *, dtype=None): + self._ivy_array = self.cumsum(dim, dtype).ivy_array + return self + def inverse(self): return torch_frontend.inverse(self._ivy_array) diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index 2a91b7fe175c8..d692b28c1046c 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -3709,6 +3709,47 @@ def test_torch_instance_cumsum( ) +# cumsum_ +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="torch.tensor", + method_name="cumsum_", + dtype_value=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("numeric"), + shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), + ), + dim=helpers.get_axis( + shape=st.shared(helpers.get_shape(), key="shape"), + allow_neg=True, + force_int=True, + ), +) +def test_torch_instance_cumsum_( + dtype_value, + dim, + frontend_method_data, + init_flags, + method_flags, + frontend, +): + input_dtype, x = dtype_value + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + init_all_as_kwargs_np={ + "data": x[0], + }, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={ + "dim": dim, + "dtype": input_dtype[0], + }, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + ) + + # sort @handle_frontend_method( class_tree=CLASS_TREE,