Skip to content

Commit

Permalink
[BugFix] fix bug of UserWarnings in test_layer_norm_op.py (#55762)
Browse files Browse the repository at this point in the history
* update TestAPI arguments to enable param_attr and bias_attr in test_layer_norm_op

* add bf16 condition in test_layer_norm_op

* add fast_math condition
  • Loading branch information
RedContritio authored Jul 31, 2023
1 parent 2931d58 commit 4df4b9f
Showing 1 changed file with 44 additions and 20 deletions.
64 changes: 44 additions & 20 deletions test/legacy_test/test_layer_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,8 +338,10 @@ def initConfig(self):


@unittest.skipIf(
paddle.is_compiled_with_rocm(),
"ROCm doesn't support bf16 LayerNormOpByOp currently",
not core.is_compiled_with_cuda()
or paddle.is_compiled_with_rocm()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestLayerNormBF16OpByOpTest_case2(TestLayerNormBF16OpByOpTest):
def initConfig(self):
Expand Down Expand Up @@ -383,8 +385,10 @@ def initConfig(self):


@unittest.skipIf(
paddle.is_compiled_with_rocm(),
"ROCm doesn't support bf16 LayerNormOpByOp currently",
not core.is_compiled_with_cuda()
or paddle.is_compiled_with_rocm()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestLayerNormBF16OpByOpTest_case3(TestLayerNormBF16OpByOpTest):
def initConfig(self):
Expand Down Expand Up @@ -427,6 +431,12 @@ def initConfig(self):
self.has_bias = True


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.is_compiled_with_rocm()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestLayerNormBF16OpByOpTest_case4(TestLayerNormBF16OpByOpTest):
def initConfig(self):
self.ori_atol = 1e-2
Expand Down Expand Up @@ -759,8 +769,8 @@ def test_case(self):
)
x = paddle.static.nn.layer_norm(
x,
scale=False,
shift=False,
scale=True,
shift=True,
begin_norm_axis=1,
epsilon=1e-05,
param_attr="scale",
Expand All @@ -786,6 +796,10 @@ def test_errors(self):
self.assertRaises(TypeError, layer_norm, x2)


@unittest.skipIf(
not core.is_compiled_with_cuda(),
"core is not compiled with CUDA or not support the float16",
)
class TestFP16ScaleBiasLayerNorm(unittest.TestCase):
def check_main(self, x_np, weight_np, bias_np, dtype):
paddle.disable_static()
Expand All @@ -810,8 +824,6 @@ def check_main(self, x_np, weight_np, bias_np, dtype):
return y_np, x_g_np, w_g_np, b_g_np

def test_main(self):
if not paddle.is_compiled_with_cuda():
return
x_np = np.random.random([10, 20]).astype('float16')
weight_np = np.random.random([20]).astype('float16')
bias_np = np.random.random([20]).astype('float16')
Expand All @@ -833,8 +845,10 @@ def assert_equal(x, y):


@unittest.skipIf(
not core.is_compiled_with_cuda() or paddle.is_compiled_with_rocm(),
"BF16 is only supported on CUDA.",
not core.is_compiled_with_cuda()
or paddle.is_compiled_with_rocm()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestBF16ScaleBiasLayerNorm(unittest.TestCase):
def check_main(self, x_np, weight_np, bias_np, dtype):
Expand Down Expand Up @@ -863,12 +877,6 @@ def check_main(self, x_np, weight_np, bias_np, dtype):
return y_np, x_g_np, w_g_np, b_g_np

def test_main(self):
if (
(not core.is_compiled_with_cuda())
or (core.cudnn_version() < 8100)
or (paddle.device.cuda.get_device_capability()[0] < 8)
):
return
x_np = np.random.random([10, 20]).astype('float32')
weight_np = np.random.random([20]).astype('float32')
bias_np = np.random.random([20]).astype('float32')
Expand Down Expand Up @@ -898,6 +906,10 @@ def test_main(self):
self.assertTrue(_keep_layer_norm_scale_bias_to_fp32())


@unittest.skipIf(
not core.is_compiled_with_cuda() or paddle.is_compiled_with_rocm(),
"core is not compiled with CUDA or not support the FastMath",
)
class TestFastMathLayerNormOp(unittest.TestCase):
def check_layer_norm(
self, dtype, x_np, scale_np, bias_np, norm_axis, has_scale, has_bias
Expand Down Expand Up @@ -968,11 +980,23 @@ def check_with_dtype(self, dtype):
has_bias=False,
)

def init_dtype(self):
self.dtype = 'float32'

def test_main(self):
if not paddle.is_compiled_with_cuda() or paddle.is_compiled_with_rocm():
return
self.check_with_dtype(dtype="float32")
self.check_with_dtype(dtype="bfloat16")
self.init_dtype()
self.check_with_dtype(dtype=self.dtype)


@unittest.skipIf(
not core.is_compiled_with_cuda()
or paddle.is_compiled_with_rocm()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support the bfloat16",
)
class TestFastMathLayerNormBF16Op(TestFastMathLayerNormOp):
def init_dtype(self):
self.dtype = 'bfloat16'


if __name__ == '__main__':
Expand Down

0 comments on commit 4df4b9f

Please sign in to comment.