diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 319504a8db2702..ae3d6121061a64 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -4271,7 +4271,7 @@ def cumprod(x, dim=None, dtype=None, name=None): if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype): x = cast(x, dtype) - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.cumprod(x, dim) else: check_variable_and_dtype( @@ -4981,7 +4981,7 @@ def digamma(x, name=None): [ nan , 5.32286835]]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.digamma(x) else: check_variable_and_dtype( @@ -5337,7 +5337,7 @@ def erfinv(x, name=None): [ 0. , 0.47693631, -inf. ]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.erfinv(x) else: check_variable_and_dtype( diff --git a/test/legacy_test/test_cumprod_op.py b/test/legacy_test/test_cumprod_op.py index da3db1ee1ef6f6..d51d1af65837f8 100644 --- a/test/legacy_test/test_cumprod_op.py +++ b/test/legacy_test/test_cumprod_op.py @@ -124,7 +124,7 @@ def test_check_output(self): for dim in range(-len(self.shape), len(self.shape)): for zero_num in self.zero_nums: self.prepare_inputs_outputs_attrs(dim, zero_num) - self.check_output() + self.check_output(check_pir=True) # test backward. def test_check_grad(self): @@ -133,13 +133,14 @@ def test_check_grad(self): self.prepare_inputs_outputs_attrs(dim, zero_num) self.init_grad_input_output(dim) if self.dtype == np.float64: - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) else: self.check_grad( ['X'], 'Out', user_defined_grads=[self.grad_x], user_defined_grad_outputs=[self.grad_out], + check_pir=True, ) diff --git a/test/legacy_test/test_digamma_op.py b/test/legacy_test/test_digamma_op.py index 04bb768a5b179e..a470e2172be223 100644 --- a/test/legacy_test/test_digamma_op.py +++ b/test/legacy_test/test_digamma_op.py @@ -42,10 +42,10 @@ def init_dtype_type(self): self.dtype = np.float64 def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad_normal(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) class TestDigammaOpFp32(TestDigammaOp): @@ -53,7 +53,7 @@ def init_dtype_type(self): self.dtype = np.float32 def test_check_grad_normal(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) class TestDigammaFP16Op(TestDigammaOp): @@ -87,10 +87,12 @@ def init_dtype_type(self): def test_check_output(self): # bfloat16 needs to set the parameter place - self.check_output_with_place(core.CUDAPlace(0)) + self.check_output_with_place(core.CUDAPlace(0), check_pir=True) def test_check_grad_normal(self): - self.check_grad_with_place(core.CUDAPlace(0), ['X'], 'Out') + self.check_grad_with_place( + core.CUDAPlace(0), ['X'], 'Out', check_pir=True + ) class TestDigammaAPI(unittest.TestCase): diff --git a/test/legacy_test/test_erfinv_op.py b/test/legacy_test/test_erfinv_op.py index 3108f8520d5328..e9eb1d668ada8c 100644 --- a/test/legacy_test/test_erfinv_op.py +++ b/test/legacy_test/test_erfinv_op.py @@ -44,7 +44,7 @@ def init_dtype(self): self.dtype = np.float64 def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): self.check_grad( @@ -52,6 +52,7 @@ def test_check_grad(self): 'Out', user_defined_grads=[self.gradient], user_defined_grad_outputs=self.grad_out, + check_pir=True, ) @@ -143,15 +144,11 @@ def setUp(self): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place) + self.check_output_with_place(place, check_pir=True) def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place( - place, - ['X'], - 'Out', - ) + self.check_grad_with_place(place, ['X'], 'Out', check_pir=True) if __name__ == "__main__":