diff --git a/tutorials/dev/bring_your_own_datatypes.py b/tutorials/dev/bring_your_own_datatypes.py index 06d96e14d28cd..bf09af9f3abe3 100644 --- a/tutorials/dev/bring_your_own_datatypes.py +++ b/tutorials/dev/bring_your_own_datatypes.py @@ -262,9 +262,9 @@ def get_cat_image(): ###################################################################### # It's easy to execute MobileNet with native TVM: -ex = tvm.relay.create_executor("graph", mod=module) +ex = tvm.relay.create_executor("graph", mod=module, params=params) input = get_cat_image() -result = ex.evaluate()(input, **params).numpy() +result = ex.evaluate()(input).numpy() # print first 10 elements print(result.flatten()[:10]) diff --git a/tutorials/frontend/from_keras.py b/tutorials/frontend/from_keras.py index 1c48aff799d42..db1b3660ae319 100644 --- a/tutorials/frontend/from_keras.py +++ b/tutorials/frontend/from_keras.py @@ -98,13 +98,13 @@ target = "cuda" dev = tvm.cuda(0) with tvm.transform.PassContext(opt_level=3): - executor = relay.build_module.create_executor("graph", mod, dev, target) + executor = relay.build_module.create_executor("graph", mod, dev, target, params).evaluate() ###################################################################### # Execute on TVM # --------------- dtype = "float32" -tvm_out = executor.evaluate()(tvm.nd.array(data.astype(dtype)), **params) +tvm_out = executor(tvm.nd.array(data.astype(dtype))) top1_tvm = np.argmax(tvm_out.numpy()[0]) ##################################################################### diff --git a/tutorials/frontend/from_onnx.py b/tutorials/frontend/from_onnx.py index 26aeb6ecaf387..fd51d7a769929 100644 --- a/tutorials/frontend/from_onnx.py +++ b/tutorials/frontend/from_onnx.py @@ -92,13 +92,15 @@ mod, params = relay.frontend.from_onnx(onnx_model, shape_dict) with tvm.transform.PassContext(opt_level=1): - intrp = relay.build_module.create_executor("graph", mod, tvm.cpu(0), target) + executor = relay.build_module.create_executor( + "graph", mod, tvm.cpu(0), target, params + ).evaluate() ###################################################################### # Execute on TVM # --------------------------------------------- dtype = "float32" -tvm_output = intrp.evaluate()(tvm.nd.array(x.astype(dtype)), **params).numpy() +tvm_output = executor(tvm.nd.array(x.astype(dtype))).numpy() ###################################################################### # Display results