diff --git a/tests/python/contrib/test_ethosu/test_codegen.py b/tests/python/contrib/test_ethosu/test_codegen.py index 13b54b988963..dc54ef071d19 100644 --- a/tests/python/contrib/test_ethosu/test_codegen.py +++ b/tests/python/contrib/test_ethosu/test_codegen.py @@ -1132,6 +1132,65 @@ def leaky_relu_func(x): ) +# conv2d + relu_n1_to_1 is used because separate activation is not offloaded to NPU. +def test_tflite_relu_n1_to_1(): + np.random.seed(0) + accel_type = "ethos-u55-256" + ifm_shape = (1, 55, 34, 3) + kernel_shape = (3, 2) + strides = (1, 1) + + @tf.function + def conv2d_relu_n1_to_1(x): + tf_strides = [1, strides[0], strides[1], 1] + weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 3] + weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32) + op = tf.nn.conv2d( + x, + weight, + strides=tf_strides, + padding="VALID", + ) + # The specific pattern will be replaced into RELU_N1_TO_1 by tflite. + return tf.math.maximum(-1.0, tf.math.minimum(op, 1.0)) + + infra.compare_tvm_with_tflite( + conv2d_relu_n1_to_1, + [ifm_shape], + accel_type, + enable_cascader=True, + ) + + +# conv2d + relu6 is used because separate activation is not offloaded to NPU. +def test_tflite_relu6(): + np.random.seed(0) + accel_type = "ethos-u55-256" + ifm_shape = (1, 55, 34, 3) + kernel_shape = (3, 2) + strides = (1, 1) + + @tf.function + def conv2d_relu6(x): + tf_strides = [1, strides[0], strides[1], 1] + weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 3] + weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32) + op = tf.nn.conv2d( + x, + weight, + strides=tf_strides, + padding="VALID", + ) + return tf.nn.relu6(op) + + infra.compare_tvm_with_tflite( + conv2d_relu6, + [ifm_shape], + accel_type, + enable_cascader=True, + ) + + @pytest.mark.parametrize("accel_type", ACCEL_TYPES) @pytest.mark.parametrize("ifm_shape", [(1, 14), (1, 151)]) @pytest.mark.parametrize("ofm_channels", [32, 64])