Skip to content

Commit

Permalink
[ETHOSN] Stricter data type conversion checks (apache#10271)
Browse files Browse the repository at this point in the history
The 21.11 update for the Ethos(TM)-N driver is slightly more strict in
accepting various operator attributes.
  • Loading branch information
leo-blonk authored and pfk-beta committed Apr 11, 2022
1 parent e7d6c49 commit b3c63f7
Show file tree
Hide file tree
Showing 6 changed files with 31 additions and 18 deletions.
2 changes: 2 additions & 0 deletions src/relay/backend/contrib/ethosn/codegen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -718,6 +718,7 @@ TVM_REGISTER_GLOBAL("relay.ethos-n.support.reshape")
.set_body([](tvm::TVMArgs args, tvm::TVMRetValue* rv) {
Call call = args[0];
ReshapeParams params;
EthosnAPI::DefaultInputTensor(call);
auto err = EthosnAPI::Reshape(call, &params);
err += EthosnCompiler::SupportedSetup();
char reason[kReasonMaxLength];
Expand Down Expand Up @@ -784,6 +785,7 @@ TVM_REGISTER_GLOBAL("relay.ethos-n.support.split")
.set_body([](tvm::TVMArgs args, tvm::TVMRetValue* rv) {
Call call = args[0];
SplitParams params;
EthosnAPI::DefaultInputTensor(call);
auto err = EthosnAPI::Split(call, &params);
err += EthosnCompiler::SupportedSetup();
char reason[kReasonMaxLength];
Expand Down
32 changes: 19 additions & 13 deletions src/relay/backend/contrib/ethosn/ethosn_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,14 @@ namespace relay {
namespace contrib {
namespace ethosn {

sl::TensorInfo EthosnAPI::DefaultInputTensor(const Expr& expr) {
Call call = Downcast<Call>(expr);
const auto* dtype = call->args[0]->checked_type().as<TensorTypeNode>();
sl::DataType data_type;
Tvm2Npu(dtype->dtype, &data_type);
return sl::TensorInfo({}, data_type, sl::DataFormat::NHWC, {});
}

EthosnError EthosnAPI::QnnConv2d(const Expr& expr, ConvolutionParams* params) {
Call requantize = Downcast<Call>(expr);
Call bias_add = Downcast<Call>(requantize->args[0]);
Expand Down Expand Up @@ -108,7 +116,7 @@ EthosnError EthosnAPI::QnnConv2d(const Expr& expr, ConvolutionParams* params) {
sl::Stride stride;
err += Tvm2Npu(conv_attr->strides, &stride);
// Dilation is not supported
std::array<uint32_t, 4> dilation = {1, 1, 1, 1};
std::array<uint32_t, 2> dilation = {1, 1};
AsArray(conv_attr->dilation, &dilation);
if (conv_attr->dilation.size() != 2 || dilation[0] != 1 || dilation[1] != 1) {
err +=
Expand Down Expand Up @@ -485,9 +493,6 @@ EthosnError EthosnAPI::DepthToSpace(const Expr& expr, DepthToSpaceParams* params
EthosnError err = Tvm2Npu(input_dtype->shape, &input_tensor_shape);
err += Tvm2Npu(input_dtype->dtype, &input_data_type);
err += Tvm2Npu(attrs->layout, &input_data_format);
if (input_data_format != sl::DataFormat::NHWC) {
err += EthosnError(ErrStrm() << "layout=" << attrs->layout << ", layout must = NHWC");
}
params->input_info = sl::TensorInfo(input_tensor_shape, input_data_type, input_data_format,
params->input_info.m_QuantizationInfo);
return err;
Expand Down Expand Up @@ -517,11 +522,11 @@ EthosnError EthosnAPI::Tvm2Npu(const Array<IndexExpr>& padding, sl::Padding* npu
}
switch (padding.size()) {
case 1:
*npu_padding = sl::Padding(dim[0], dim[0], dim[0], dim[0]);
*npu_padding = sl::Padding(dim[3], dim[3], dim[3], dim[3]);
break;
case 2:
// Height, width -> top, bottom, left, right
*npu_padding = sl::Padding(dim[0], dim[0], dim[1], dim[1]);
*npu_padding = sl::Padding(dim[3], dim[3], dim[2], dim[2]);
break;
case 4:
// Top, left, bottom, right -> top, bottom, left, right
Expand All @@ -538,7 +543,7 @@ EthosnError EthosnAPI::Tvm2Npu(const Array<IndexExpr>& strides, sl::Stride* npu_
if (strides.size() != 2) {
return EthosnError(ErrStrm() << "stride size=" << strides.size() << ", stride size must = 2");
}
std::array<uint32_t, 4> dim;
std::array<uint32_t, 2> dim;
if (EthosnError err = AsArray<IndexExpr, uint32_t>(strides, &dim)) {
return err;
}
Expand All @@ -550,7 +555,7 @@ EthosnError EthosnAPI::Tvm2Npu(const Array<IndexExpr>& size, uint32_t* x, uint32
if (size.size() != 2) {
return EthosnError(ErrStrm() << "dimensions=" << size.size() << ", dimensions must = 2");
}
std::array<uint32_t, 4> dim;
std::array<uint32_t, 2> dim;
if (EthosnError err = AsArray<IndexExpr, uint32_t>(size, &dim)) {
return err;
}
Expand Down Expand Up @@ -647,11 +652,12 @@ EthosnError EthosnAPI::Tvm2Npu(const Array<Array<Integer>>& padding, sl::Padding
// Convert an array of IntImmNodes into ValueT
// IndexT type of Array indexing variable
// ValueT type of resulting value
template <typename IndexT, typename ValueT>
EthosnError EthosnAPI::AsArray(const Array<IndexT>& arr, std::array<ValueT, 4>* v) {
if (arr.size() > 4)
return EthosnError(ErrStrm() << "dimensions=" << arr.size() << ", dimensions must be <= 4");
for (size_t i = 0; i < std::min(arr.size(), 4ul); i++) {
// N The size of the output array
template <typename IndexT, typename ValueT, size_t N>
EthosnError EthosnAPI::AsArray(const Array<IndexT>& arr, std::array<ValueT, N>* v) {
if (arr.size() > N)
return EthosnError(ErrStrm() << "dimensions=" << arr.size() << ", dimensions must be <= " << N);
for (size_t i = 0; i < arr.size(); i++) {
const PrimExpr& a = arr[i];
const auto* intImm = a.as<IntImmNode>();
if (intImm->value > std::numeric_limits<ValueT>::max()) {
Expand Down
7 changes: 5 additions & 2 deletions src/relay/backend/contrib/ethosn/ethosn_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,9 @@ class EthosnError {
*/
class EthosnAPI {
public:
/*! \brief Create a default input tensor */
static sl::TensorInfo DefaultInputTensor(const Expr& expr);

/*! \brief Extract the Support Library convolution params from an ethos-n.qnn_conv2d func */
static EthosnError QnnConv2d(const Expr& expr, ConvolutionParams* params);
/*! \brief Extract the Support Library dense params from an ethos-n.qnn_fc func */
Expand Down Expand Up @@ -235,8 +238,8 @@ class EthosnAPI {
// Convert an array of IntImmNodes into ValueT
// IndexT type of Array indexing variable
// ValueT type of resulting value
template <typename IndexT, typename ValueT>
static EthosnError AsArray(const Array<IndexT>& arr, std::array<ValueT, 4>* v);
template <typename IndexT, typename ValueT, size_t N>
static EthosnError AsArray(const Array<IndexT>& arr, std::array<ValueT, N>* v);

// Get a T from a constant represented by a NDArray.
template <typename T>
Expand Down
2 changes: 1 addition & 1 deletion tests/python/contrib/test_ethosn/test_depth_to_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def test_depth_to_space_failure():
"dtype='int16', dtype must be either uint8, int8 or int32;",
),
((1, 16, 16, 16), 4, "uint8", "NHWC", "Only block size of 2 is supported"),
((1, 16, 16, 16), 2, "uint8", "NCHW", "layout=NCHW, layout must = NHWC"),
((1, 16, 16, 16), 2, "uint8", "NCHW", "Input layer must be NHWC or NHWCB"),
]

for shape, block, dtype, layout, err_msg in trials:
Expand Down
3 changes: 2 additions & 1 deletion tests/python/contrib/test_ethosn/test_networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,8 @@ def test_ssd_mobilenet_v1():
# on hardware that isn't available in CI.
_compile_hash = {"5ee8ed6af9a7f31fc14957b51a8e7423", "e6a91ccc47ba4c6b4614fcd676bd726f"}
if tei.get_ethosn_api_version() == 2111:
_compile_hash = {"afb68ca8f452d1f4a674b457b5e30f59", "a37f900601b9493bd142e8aed16205e5"}
# TODO(Leo-arm): review split operator
_compile_hash = {"a37f900601b9493bd142e8aed16205e5", "afb68ca8f452d1f4a674b457b5e30f59"}
if tei.get_ethosn_api_version() == 2102:
_compile_hash = {"7795b6c67178da9d1f9b98063bad75b1", "10826406ae724e52f360a06c35ced09d"}
if tei.get_ethosn_variant() == "Ethos-N78_1TOPS_2PLE_RATIO":
Expand Down
3 changes: 2 additions & 1 deletion tests/python/contrib/test_ethosn/test_split.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,9 @@ def test_split(dtype):
trials = [
((1, 16, 16, 32), (2, 7, 10), 2),
((1, 12, 8, 16), 3, 1),
((1, 66), 11, 1),
]
if tei.get_ethosn_api_version() < 2111:
trials.append(((1, 66), 11, 1))

np.random.seed(0)
for shape, splits, axis in trials:
Expand Down

0 comments on commit b3c63f7

Please sign in to comment.