Skip to content

Commit

Permalink
Merge pull request neo-ai#3 from trevor-m/fix-quant-flatten
Browse files Browse the repository at this point in the history
Fix tensor_quant_flatten for non rank 3 inputs + Use opt level 0 for calib
  • Loading branch information
jianzhong-xu authored Jun 9, 2020
2 parents 6245877 + 50db3ed commit 14ac1f4
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions python/tvm/relay/backend/contrib/tidl.py
Original file line number Diff line number Diff line change
Expand Up @@ -965,7 +965,7 @@ def tensor_quant_flatten(input_tensor, data_layout):
# only use 1 batch for calibration
input_tensor = input_tensor[0,:]
# change layout to CxHxW to use numpy.flattern to change to 1-d array
if data_layout == "NHWC":
if data_layout == "NHWC" and len(input_tensor.shape) == 3:
input_tensor = input_tensor.transpose(2,0,1)

if np.amin(input_tensor) >= 0:
Expand Down Expand Up @@ -1183,7 +1183,8 @@ def generate_subgraph_tensors(mod, params, input_node, input_data):
print("Input map:", my_mutator.name_map)

# Build and execute calibration graph to get outputs
with relay.build_config(opt_level=3):
# Use opt_level=0 to avoid optimizations which modify the module (could change original module)
with relay.build_config(opt_level=0):
graph, lib, params = relay.build(mod_tvm, "llvm", params=params)
mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
mod.set_input(input_node, input_data)
Expand Down

0 comments on commit 14ac1f4

Please sign in to comment.