Skip to content

Commit

Permalink
[CMSIS-NN] Global function that provides range based on dtype (apache…
Browse files Browse the repository at this point in the history
…#13652)

Range for dtype was being sought differently from
inside various CMSIS-NN tests. This commit has created
a common global function inside aot.py under
tvm.testing that can provide (min, max) values based
on the dtype. In future, other AOT based targets can
make use of this function to obtain the range.
  • Loading branch information
ashutosh-arm authored and Mikael Sevenier committed Dec 29, 2022
1 parent 8738db9 commit c09944c
Show file tree
Hide file tree
Showing 13 changed files with 85 additions and 102 deletions.
31 changes: 30 additions & 1 deletion python/tvm/testing/aot.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import subprocess
import tarfile
import logging
from typing import Any, NamedTuple, Union, Optional, List, Dict
from typing import Any, NamedTuple, Union, Tuple, Optional, List, Dict
import numpy as np

import tvm
Expand Down Expand Up @@ -901,6 +901,35 @@ def compile_and_run(
)


def get_dtype_range(dtype: str) -> Tuple[int, int]:
"""
Produces the min,max for a give data type.
Parameters
----------
dtype : str
a type string (e.g., int8, float64)
Returns
-------
type_info.min : int
the minimum of the range
type_info.max : int
the maximum of the range
"""
type_info = None
np_dtype = np.dtype(dtype)
kind = np_dtype.kind

if kind == "f":
type_info = np.finfo(np_dtype)
elif kind in ["i", "u"]:
type_info = np.iinfo(np_dtype)
else:
raise TypeError(f"dtype ({dtype}) must indicate some floating-point or integral data type.")
return type_info.min, type_info.max


def generate_ref_data(mod, input_data, params=None, target="llvm"):
"""Generate reference data through executing the relay module"""
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
Expand Down
14 changes: 6 additions & 8 deletions tests/python/contrib/test_cmsisnn/test_binary_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
import tvm
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import generate_ref_data, AOTTestModel, compile_and_run
from tvm.testing.aot import get_dtype_range, generate_ref_data, AOTTestModel, compile_and_run
from tvm.micro.testing.aot_test_utils import (
AOT_USMP_CORSTONE300_RUNNER,
)
Expand All @@ -34,7 +34,6 @@
skip_if_no_reference_system,
make_module,
make_qnn_relu,
get_range_for_dtype_str,
assert_partitioned_function,
assert_no_external_function,
create_test_runner,
Expand All @@ -45,9 +44,8 @@ def generate_tensor_constant():
rng = np.random.default_rng(12321)
dtype = "int8"
shape = (1, 16, 16, 3)
values = tvm.nd.array(
rng.integers(np.iinfo(dtype).min, high=np.iinfo(dtype).max, size=shape, dtype=dtype)
)
in_min, in_max = get_dtype_range(dtype)
values = tvm.nd.array(rng.integers(in_min, high=in_max, size=shape, dtype=dtype))
return relay.const(values, dtype)


Expand Down Expand Up @@ -136,7 +134,7 @@ def test_op_int8(
assert_partitioned_function(orig_mod, cmsisnn_mod)

# validate the output
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)
inputs = {
"input_0": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
"input_1": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
Expand Down Expand Up @@ -196,7 +194,7 @@ def test_same_input_to_binary_op(op, relu_type):
), "Composite function for the binary op should have only 1 parameter."

# validate the output
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)
inputs = {
"input": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype),
}
Expand Down Expand Up @@ -275,7 +273,7 @@ def test_constant_input_int8(op, input_0, input_1):
assert_partitioned_function(orig_mod, cmsisnn_mod)

# validate the output
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)
inputs = {}
if isinstance(input_0, tvm.relay.expr.Var):
inputs.update({"input_0": np.random.randint(in_min, high=in_max, size=shape, dtype=dtype)})
Expand Down
21 changes: 11 additions & 10 deletions tests/python/contrib/test_cmsisnn/test_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from tvm.relay.op.contrib import cmsisnn

from tvm.testing.aot import (
get_dtype_range,
generate_ref_data,
AOTTestModel,
compile_models,
Expand All @@ -33,7 +34,6 @@
from tvm.micro.testing.aot_test_utils import AOT_USMP_CORSTONE300_RUNNER
from .utils import (
make_module,
get_range_for_dtype_str,
get_same_padding,
get_conv2d_qnn_params,
get_kernel_bias_dtype,
Expand Down Expand Up @@ -82,10 +82,11 @@ def make_model(
p = get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)

rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(kernel_dtype)
kernel = tvm.nd.array(
rng.integers(
np.iinfo(kernel_dtype).min,
high=np.iinfo(kernel_dtype).max,
kmin,
high=kmax,
size=kernel_shape,
dtype=kernel_dtype,
)
Expand Down Expand Up @@ -157,7 +158,7 @@ def test_conv2d_number_primfunc_args(
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)
relu_type = "RELU"

kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
Expand Down Expand Up @@ -264,7 +265,7 @@ def test_conv2d_symmetric_padding(
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)

kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)

Expand Down Expand Up @@ -358,7 +359,7 @@ def test_conv2d_asymmetric_padding(
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)

kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)

Expand Down Expand Up @@ -454,7 +455,7 @@ def test_pad_conv2d_fusion_int8(
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)

kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)
output_scale, output_zero_point = get_conv2d_qnn_params(
Expand Down Expand Up @@ -567,7 +568,7 @@ def test_invalid_pad_conv2d_fusion_int8(
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)

kernel_dtype, bias_dtype = get_kernel_bias_dtype(dtype)

Expand Down Expand Up @@ -740,7 +741,7 @@ def test_depthwise(
kernel_w = kernel_size[1]
kernel_shape = (kernel_h, kernel_w, ifm_shape[3] // groups, out_channels)
kernel_zero_point = 0
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)

groups = ifm_shape[3]
kernel_layout = "HWOI"
Expand Down Expand Up @@ -844,7 +845,7 @@ def test_relay_conv2d_cmsisnn_depthwise_int8(
test_runner = AOT_USMP_CORSTONE300_RUNNER

dtype = "int8"
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)

ifm_shape = (1, 24, 24, 1)
groups = ifm_shape[3]
Expand Down
10 changes: 5 additions & 5 deletions tests/python/contrib/test_cmsisnn/test_fully_connected.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,9 @@
from tvm import relay
from tvm.relay.op.contrib import cmsisnn

from tvm.testing.aot import generate_ref_data, AOTTestModel, compile_and_run
from tvm.testing.aot import get_dtype_range, generate_ref_data, AOTTestModel, compile_and_run
from .utils import (
make_module,
get_range_for_dtype_str,
get_conv2d_qnn_params,
make_qnn_relu,
assert_partitioned_function,
Expand Down Expand Up @@ -55,10 +54,11 @@ def make_model(
"""Return a model and any parameters it may have"""
input_ = relay.var("input", shape=in_shape, dtype=dtype)
rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(kernel_dtype)
weight = tvm.nd.array(
rng.integers(
np.iinfo(kernel_dtype).min,
high=np.iinfo(kernel_dtype).max,
kmin,
high=kmax,
size=kernel_shape,
dtype=kernel_dtype,
)
Expand Down Expand Up @@ -123,7 +123,7 @@ def test_ops(
kernel_zero_point = 0
kernel_shape = [out_channels, in_shape[1]]
conv2d_kernel_shape = (1, 1, kernel_shape[0], kernel_shape[1])
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)

output_scale, output_zero_point = get_conv2d_qnn_params(
conv2d_kernel_shape,
Expand Down
22 changes: 13 additions & 9 deletions tests/python/contrib/test_cmsisnn/test_fuse_pads.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm.testing.aot import get_dtype_range
from tvm import relay
from .utils import CheckForPadsWithinCompositeFunc

Expand Down Expand Up @@ -59,10 +59,11 @@ def test_invalid_padding_for_fusion(ifm_shape, pad_width, conv2d_padding, ofm_sh
pad_mode="constant",
)
rng = np.random.default_rng(12321)
in_min, in_max = get_dtype_range(dtype)
local_weight = tvm.nd.array(
rng.integers(
np.iinfo(dtype).min,
high=np.iinfo(dtype).max,
in_min,
high=in_max,
size=(ofm_channels, kernel_size[0], kernel_size[1], ifm_shape[3]),
dtype=dtype,
)
Expand Down Expand Up @@ -139,10 +140,11 @@ def test_pad_conv2d_fusion_noncmsisnn_target(ifm_shape, pad_width, conv2d_paddin
pad_mode="constant",
)
rng = np.random.default_rng(12321)
in_min, in_max = get_dtype_range(dtype)
local_weight = tvm.nd.array(
rng.integers(
np.iinfo(dtype).min,
high=np.iinfo(dtype).max,
in_min,
high=in_max,
size=(ofm_channels, kernel_size[0], kernel_size[1], ifm_shape[3]),
dtype=dtype,
)
Expand Down Expand Up @@ -217,10 +219,11 @@ def test_pad_conv2d_fusion(ifm_shape, pad_width, conv2d_padding, ofm_shape):
pad_mode="constant",
)
rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(dtype)
local_weight = tvm.nd.array(
rng.integers(
np.iinfo(dtype).min,
high=np.iinfo(dtype).max,
kmin,
high=kmax,
size=(ofm_channels, kernel_size[0], kernel_size[1], ifm_shape[3]),
dtype=dtype,
)
Expand Down Expand Up @@ -281,10 +284,11 @@ def test_without_preceding_pad():
ofm_shape = (1, 56, 56, 64)
local_input = relay.var("local_input", shape=ifm_shape, dtype=dtype)
rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(dtype)
local_weight = tvm.nd.array(
rng.integers(
np.iinfo(dtype).min,
high=np.iinfo(dtype).max,
kmin,
high=kmax,
size=(64, 3, 3, 64),
dtype=dtype,
)
Expand Down
7 changes: 4 additions & 3 deletions tests/python/contrib/test_cmsisnn/test_generate_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm.testing.aot import get_dtype_range
from tvm import relay
from tvm.relay.op.contrib import cmsisnn

Expand Down Expand Up @@ -107,10 +107,11 @@ def make_model(

weight_shape = (kernel_h, kernel_w, shape[3] // groups, out_channels)
rng = np.random.default_rng(12321)
kmin, kmax = get_dtype_range(kernel_dtype)
weight = tvm.nd.array(
rng.integers(
np.iinfo(kernel_dtype).min,
high=np.iinfo(kernel_dtype).max,
kmin,
high=kmax,
size=weight_shape,
dtype=kernel_dtype,
)
Expand Down
5 changes: 2 additions & 3 deletions tests/python/contrib/test_cmsisnn/test_invalid_graphs.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,12 @@
import numpy as np
import tvm

from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.testing.aot import AOTTestModel, get_dtype_range, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_USMP_CORSTONE300_RUNNER,
)
from .utils import (
skip_if_no_reference_system,
get_range_for_dtype_str,
)


Expand Down Expand Up @@ -58,7 +57,7 @@ def @main(%data : Tensor[(16, 29), int8]) -> Tensor[(16, 29), int8] {
use_unpacked_api = True
test_runner = AOT_USMP_CORSTONE300_RUNNER
dtype = "int8"
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)
rng = np.random.default_rng(12345)
inputs = {"data": rng.integers(in_min, high=in_max, size=(16, 29), dtype=dtype)}
outputs = generate_ref_data(orig_mod["main"], inputs, params)
Expand Down
6 changes: 3 additions & 3 deletions tests/python/contrib/test_cmsisnn/test_networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@
from tvm import relay
from tvm.contrib.download import download_testdata
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.testing.aot import AOTTestModel, get_dtype_range, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
AOT_USMP_CORSTONE300_RUNNER,
)
from .utils import skip_if_no_reference_system, get_range_for_dtype_str
from .utils import skip_if_no_reference_system

# pylint: disable=import-outside-toplevel
def _convert_to_relay(
Expand Down Expand Up @@ -93,7 +93,7 @@ def test_cnn_small(test_runner):

input_shape = (1, 490)
dtype = "int8"
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)
rng = np.random.default_rng(12345)
input_data = rng.integers(in_min, high=in_max, size=input_shape, dtype=dtype)

Expand Down
4 changes: 2 additions & 2 deletions tests/python/contrib/test_cmsisnn/test_pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,14 @@
from tvm.relay.op.contrib import cmsisnn

from tvm.testing.aot import (
get_dtype_range,
generate_ref_data,
AOTTestModel,
compile_and_run,
)
from tvm.micro.testing.aot_test_utils import AOT_USMP_CORSTONE300_RUNNER
from .utils import (
make_module,
get_range_for_dtype_str,
get_same_padding,
make_qnn_relu,
assert_partitioned_function,
Expand Down Expand Up @@ -128,7 +128,7 @@ def test_ops(
assert_partitioned_function(orig_mod, cmsisnn_mod)

# validate the output
in_min, in_max = get_range_for_dtype_str(dtype)
in_min, in_max = get_dtype_range(dtype)
np.random.seed(0)
inputs = {
"input": np.random.randint(in_min, high=in_max, size=in_shape, dtype=dtype),
Expand Down
Loading

0 comments on commit c09944c

Please sign in to comment.