Skip to content

Commit

Permalink
[PIR] Migrate paddle.nn.Sigmoid into pir (PaddlePaddle#58144)
Browse files Browse the repository at this point in the history
  • Loading branch information
MarioLulab authored and wentaoyu committed Oct 24, 2023
1 parent 9cb5b01 commit 84d2ffb
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 13 deletions.
2 changes: 1 addition & 1 deletion python/paddle/tensor/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -916,7 +916,7 @@ def sigmoid(x, name=None):
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.40131235, 0.45016602, 0.52497917, 0.57444251])
"""
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.sigmoid(x)
else:
check_variable_and_dtype(
Expand Down
33 changes: 21 additions & 12 deletions test/legacy_test/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,25 +388,35 @@ def init_dtype(self):
def if_enable_cinn(self):
pass

def test_check_output(self):
self.check_output(check_pir=True)

def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
self.check_grad(
['X'],
'Out',
max_relative_error=0.01,
check_prim=True,
check_pir=True,
)


class TestSigmoid_Complex64(TestSigmoid):
def init_dtype(self):
self.dtype = np.complex64

def test_check_output(self):
self.check_output(check_prim=False)
self.check_output(check_prim=False, check_pir=True)

def test_check_grad(self):
self.check_grad(
['X'],
'Out',
max_relative_error=0.006,
check_prim=False,
check_pir=True,
)


Expand All @@ -415,11 +425,7 @@ def init_dtype(self):
self.dtype = np.complex128

def test_check_grad(self):
self.check_grad(
['X'],
'Out',
check_prim=False,
)
self.check_grad(['X'], 'Out', check_prim=False, check_pir=True)


class TestSigmoid_ZeroDim(TestSigmoid):
Expand Down Expand Up @@ -460,12 +466,13 @@ def if_enable_cinn(self):

def test_check_output(self):
place = core.CUDAPlace(0)
# elementwise_pow doesn't support bfloat16, skip check_prim here.
self.check_output_with_place(place, check_prim=True)
self.check_output_with_place(place, check_prim=True, check_pir=True)

def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
self.check_grad_with_place(
place, ['X'], 'Out', check_prim=True, check_pir=True
)


'''
Expand Down Expand Up @@ -4652,7 +4659,9 @@ def test_check_grad(self):
TestExpFp32_Prim, check_prim=True, enable_cinn=True, check_prim_pir=True
)
create_test_act_fp16_class(TestExpm1)
create_test_act_fp16_class(TestSigmoid, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(
TestSigmoid, check_prim=True, enable_cinn=True, check_pir=True
)
create_test_act_fp16_class(
TestSilu, check_prim=True, enable_cinn=True, check_prim_pir=True
)
Expand Down Expand Up @@ -4822,7 +4831,7 @@ def test_check_grad(self):
TestExpFp32_Prim, check_prim=True, check_prim_pir=True
)
create_test_act_bf16_class(TestExpm1)
create_test_act_bf16_class(TestSigmoid, check_prim=True)
create_test_act_bf16_class(TestSigmoid, check_prim=True, check_pir=True)
create_test_act_bf16_class(TestSilu, check_prim=True, check_prim_pir=True)
create_test_act_bf16_class(TestLogSigmoid)
create_test_act_bf16_class(TestTanh, check_prim=True, check_prim_pir=True)
Expand Down

0 comments on commit 84d2ffb

Please sign in to comment.