diff --git a/tutorials/dev/bring_your_own_datatypes.py b/tutorials/dev/bring_your_own_datatypes.py index a5e8e2898d39..1cf556ddd056 100644 --- a/tutorials/dev/bring_your_own_datatypes.py +++ b/tutorials/dev/bring_your_own_datatypes.py @@ -257,8 +257,9 @@ def get_cat_image(): ###################################################################### # It's easy to execute MobileNet with native TVM: +ex = tvm.relay.create_executor("graph", mod=module, params=params) input = get_cat_image() -result = tvm.relay.create_executor("graph", mod=module).evaluate()(input, **params).numpy() +result = ex.evaluate()(input).numpy() # print first 10 elements print(result.flatten()[:10]) diff --git a/tutorials/frontend/from_keras.py b/tutorials/frontend/from_keras.py index e62836d2ccfe..182e769b35b1 100644 --- a/tutorials/frontend/from_keras.py +++ b/tutorials/frontend/from_keras.py @@ -103,14 +103,14 @@ # due to a latent bug. Note that the pass context only has an effect within # evaluate() and is not captured by create_executor(). with tvm.transform.PassContext(opt_level=0): - model = relay.build_module.create_executor("graph", mod, dev, target).evaluate() + model = relay.build_module.create_executor("graph", mod, dev, target, params).evaluate() ###################################################################### # Execute on TVM # --------------- dtype = "float32" -tvm_out = model(tvm.nd.array(data.astype(dtype)), **params) +tvm_out = model(tvm.nd.array(data.astype(dtype))) top1_tvm = np.argmax(tvm_out.numpy()[0]) ##################################################################### diff --git a/tutorials/frontend/from_onnx.py b/tutorials/frontend/from_onnx.py index 890bfbac4d8a..fd51d7a76992 100644 --- a/tutorials/frontend/from_onnx.py +++ b/tutorials/frontend/from_onnx.py @@ -92,13 +92,15 @@ mod, params = relay.frontend.from_onnx(onnx_model, shape_dict) with tvm.transform.PassContext(opt_level=1): - compiled = relay.build_module.create_executor("graph", mod, tvm.cpu(0), target).evaluate() + executor = relay.build_module.create_executor( + "graph", mod, tvm.cpu(0), target, params + ).evaluate() ###################################################################### # Execute on TVM # --------------------------------------------- dtype = "float32" -tvm_output = compiled(tvm.nd.array(x.astype(dtype)), **params).numpy() +tvm_output = executor(tvm.nd.array(x.astype(dtype))).numpy() ###################################################################### # Display results