Skip to content

Commit

Permalink
[Tutorial][Frontend] move from_keras tutorial to frontend (#2479)
Browse files Browse the repository at this point in the history
* [Tutorial][Frontend] move from_keras tutorial to frontend

* remove tutorial/nnvm/from_keras.py
  • Loading branch information
Huyuwei authored and tqchen committed Feb 20, 2019
1 parent cc2b676 commit aaad5f9
Showing 1 changed file with 13 additions and 23 deletions.
36 changes: 13 additions & 23 deletions tutorials/nnvm/from_keras.py → tutorials/frontend/from_keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
=====================
**Author**: `Yuwei Hu <https://Huyuwei.github.io/>`_
This article is an introductory tutorial to deploy keras models with NNVM.
This article is an introductory tutorial to deploy keras models with Relay.
For us to begin with, keras should be installed.
Tensorflow is also required since it's used as the default backend of keras.
Expand All @@ -18,8 +18,8 @@
or please refer to official site
https://keras.io/#installation
"""
import nnvm
import tvm
import tvm.relay as relay
import keras
import numpy as np

Expand Down Expand Up @@ -66,32 +66,22 @@ def download(url, path, overwrite=False):
print('input_1', data.shape)

######################################################################
# Compile the model on NNVM
# --------------------------
# We should be familiar with the process now.

# convert the keras model(NHWC layout) to NNVM format(NCHW layout).
sym, params = nnvm.frontend.from_keras(keras_resnet50)
# Compile the model with Relay
# ----------------------------
# convert the keras model(NHWC layout) to Relay format(NCHW layout).
shape_dict = {'input_1': data.shape}
func, params = relay.frontend.from_keras(keras_resnet50, shape_dict)
# compile the model
target = 'cuda'
shape_dict = {'input_1': data.shape}
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params)
ctx = tvm.gpu(0)
with relay.build_config(opt_level=3):
executor = relay.build_module.create_executor('graph', func, ctx, target)

######################################################################
# Execute on TVM
# ---------------
# The process is no different from other examples.
from tvm.contrib import graph_runtime
ctx = tvm.gpu(0)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input('input_1', tvm.nd.array(data.astype('float32')))
m.set_input(**params)
# execute
m.run()
# get outputs
tvm_out = m.get_output(0)
dtype = 'float32'
tvm_out = executor.evaluate(func)(tvm.nd.array(data.astype(dtype)), **params)
top1_tvm = np.argmax(tvm_out.asnumpy()[0])

#####################################################################
Expand All @@ -106,7 +96,7 @@ def download(url, path, overwrite=False):
download(synset_url, synset_name)
with open(synset_name) as f:
synset = eval(f.read())
print('NNVM top-1 id: {}, class name: {}'.format(top1_tvm, synset[top1_tvm]))
print('Relay top-1 id: {}, class name: {}'.format(top1_tvm, synset[top1_tvm]))
# confirm correctness with keras output
keras_out = keras_resnet50.predict(data.transpose([0, 2, 3, 1]))
top1_keras = np.argmax(keras_out)
Expand Down

0 comments on commit aaad5f9

Please sign in to comment.