From 4cc90acab6fb8869328a51a5046b22f2e0dd17b3 Mon Sep 17 00:00:00 2001 From: CircleSpin Date: Fri, 3 Sep 2021 13:27:30 -0400 Subject: [PATCH 01/32] finish rpc and shape_dict in tut --- tutorials/get_started/tvmc_python.py | 218 +++++++++++++++++++++++++++ 1 file changed, 218 insertions(+) create mode 100644 tutorials/get_started/tvmc_python.py diff --git a/tutorials/get_started/tvmc_python.py b/tutorials/get_started/tvmc_python.py new file mode 100644 index 000000000000..a336f13fbf67 --- /dev/null +++ b/tutorials/get_started/tvmc_python.py @@ -0,0 +1,218 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Getting Starting using TVMC Python, the simplified tvm API +========================================== +**Authors**: +`Jocelyn Shiue `_, + +

Welcome to TVMC Python #TODO +Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 + +################################################################################ +# Step 0: Imports +# --------------- +# +# .. code-block:: python +# +# from tvm.driver import tvmc +# + +################################################################################ +# Step 1: Load a model +# -------------------- +# Let's import our model into tvmc. +# Let's import our model into tvmc. This step converts a machine learning model from +# a supported framework into tvm's high level graph representation language called relay. +# This is to have a unified starting point for all models in tvm. The frameworks we currently +# support are: Keras, Onnx, Tensorflow, TFLite, and Pytorch. +# +# .. code-block:: python +# model = tvmc.load('my_model.onnx') #Step 1: Load +# +# If you'd like to see the relay, you can run: +# ``model.summary()`` +# + +################################################################################ +# Step 2: Compile +# ---------------- +# Now that our model is in relay, our next step is to compile it to a desired +# hardware to run on. We refer to this hardware as a target. This compilation process +# translates the model from relay into a lower-level language that the +# target machine can understand. +# +# In order to compile a model a tvm.target string is required. +# To learn more about tvm.targets and their options look at the `documentation `_. +# Some examples include: +# 1. cuda (nvidia gpu) +# 2. llvm (cpu) +# 3. llvm -mcpu=cascadelake (intel cpu) +# +# .. code-block:: python +# package = tvmc.compile(model, target="llvm") #Step 2: Compile +# +# The compilation step returns a package. +# + +################################################################################ +# Step 3: Run +# ----------- +# The compiled package can now be run on the hardware target. The device +# input options are: cpu, cuda, cl, metal, and vulkan. +# +# .. code-block:: python +# result = tvmc.run(package, device="cpu") #Step 3: Run +# +# And you can print the results: +# ``print(results)`` +# + +################################################################################ +# Step 1.5: Tune [Optional & Recommended] +# --------------------------------------- +# Run speed can further be improved by tuning. This optional step uses +# machine learning to look at each operation within a model (a function) and +# tries to find a faster way to run it. We do this through a cost model, and +# bench marking possible schedules. +# +# The target is the same as compile. +# +# .. code-block:: python +# tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune +# +# The terminal output should look like: +# [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s +# [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s +# ..... +# +# There may be UserWarnings that can be ignored. +# This should make the end result faster, but it can take hours to tune. +# + +################################################################################ +# Save and then start the process in the terminal: +# ------------------------------------------------ +# +# .. code-block:: python +# python my_tvmc_script.py +# +# Note: Your fans may become very active +# + +################################################################################ +# Example results: +# ---------------- +# +# .. code-block:: python +# Time elapsed for training: 18.99 s +# Execution time summary: +# mean (ms) max (ms) min (ms) std (ms) +# 25.24 26.12 24.89 0.38 +# +# Output Names: +# ['output_0'] +# + +

Additional TVMC Functionalities #TODO + +################################################################################ +# Saving the model +# ---------------- +# +# To make things faster for later, after loading the model (Step 1) save the relay version. +# +# .. code-block:: python +# model = tvmc.load('my_model.onnx') #Step 1: Load +# model.save(model_path) +# + +################################################################################ +# Saving the package +# ------------------ +# +# After the model has been compiled (Step 2) the package also is also saveable. +# +# .. code-block:: python +# tvmc.compile(model, target="llvm", package_path="whatever") #Step 2: Compile +# +# new_package = tvmc.TVMCPackage(package_path="whatever") +# result = tvmc.run(new_package) #Step 3: Run +# + +################################################################################ +# Using Autoscheduler +# ------------------- +# Use the next generation of tvm to enable potentially faster run speed results. +# The search space of the schedules is automatically generated unlike +# previously where they needed to be hand written. (Learn more: 1, 2) +# +# .. code-block:: python +# tvmc.tune(model, target="llvm", enable_autoscheduler = True) #Step 1.5: Optional Tune +# + +################################################################################ +# Saving the tuning results +# ------------------------- +# +# The tuning results can be saved in a file for later reuse. +# +# Method 1: +# .. code-block:: python +# log_file = "hello.json" +# +# # Run tuning +# tvmc.tune(model, target="llvm",tuning_records=log_file) +# +# ... +# +# # Later run tuning and reuse tuning results +# tvmc.tune(model, target="llvm",tuning_records=log_file) +# +# Method 2: +# .. code-block:: python +# # Run tuning +# tuning_records = tvmc.tune(model, target="llvm") +# +# ... +# +# # Later run tuning and reuse tuning results +# tvmc.tune(model, target="llvm",tuning_records=tuning_records) +# + +################################################################################ +# Using an RPC Server: +# +# +# This thing needs some love +# +# +# +# +# +# + +################################################################################ +# Tuning a more complex model: +# ---------------------------- +# If you notice T's (timeouts) printed, increase the searching time frame: +# +# .. code-block:: python +# tvmc.tune(model,trials=10000,timeout=10,) +# + +""" \ No newline at end of file From 60d213109746a2c0505f5df4d25777aa71f54e59 Mon Sep 17 00:00:00 2001 From: CircleSpin Date: Mon, 13 Sep 2021 17:30:20 -0400 Subject: [PATCH 02/32] added more to rpc --- tutorials/get_started/tvmc_python.py | 40 +++++++++++++++++++--------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/tutorials/get_started/tvmc_python.py b/tutorials/get_started/tvmc_python.py index a336f13fbf67..8c0907ff3145 100644 --- a/tutorials/get_started/tvmc_python.py +++ b/tutorials/get_started/tvmc_python.py @@ -47,6 +47,17 @@ # If you'd like to see the relay, you can run: # ``model.summary()`` # +# All frameworks support over writing the input shapes with a shape_dict argument. +# For most frameworks this is optional but for Pytorch this is necessary. +# +# .. code-block:: python +# ### Step 1: Load shape_dict Style +# # shape_dict = {'model_input_name1': [1, 3, 224, 224], 'input2': [1, 2, 3, 4], ...} #example format with random numbers +# # model = tvmc.load(model_path, shape_dict=shape_dict) #Step 1: Load + shape_dict +# +# One way to see the model's input/shape_dict is via `netron `_, . After opening the model, +# click the first node to see the name(s) and shape(s) in the inputs section. + ################################################################################ # Step 2: Compile @@ -194,18 +205,6 @@ # tvmc.tune(model, target="llvm",tuning_records=tuning_records) # -################################################################################ -# Using an RPC Server: -# -# -# This thing needs some love -# -# -# -# -# -# - ################################################################################ # Tuning a more complex model: # ---------------------------- @@ -215,4 +214,21 @@ # tvmc.tune(model,trials=10000,timeout=10,) # +################################################################################ +# Compiling a model for a remote device: +# +# A remote procedural call is useful when you would like to compile for hardware +# that is not on your local machine. The tvmc methods support this. +# To set up the RPC server take a look at the 'Set up RPC Server on Device' +# section in this `document `_. +# +# Within the TVMC Script include the following and adjust accordingly: +# +# .. code-block:: python +# tvmc.tune(model,trials=10000,timeout=10,) +# tvmc.tune(model,trials=10000,timeout=10,) +# tvmc.tune(model,trials=10000,timeout=10,) +# tvmc.tune(model,trials=10000,timeout=10,) +# + """ \ No newline at end of file From 0ea76cfdf2fa96343f8a9e9fac91bf765364707a Mon Sep 17 00:00:00 2001 From: CircleSpin Date: Wed, 15 Sep 2021 12:09:17 -0400 Subject: [PATCH 03/32] tutorial edits --- tutorials/get_started/tvmc_python.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tutorials/get_started/tvmc_python.py b/tutorials/get_started/tvmc_python.py index 8c0907ff3145..1324bcaa9be4 100644 --- a/tutorials/get_started/tvmc_python.py +++ b/tutorials/get_started/tvmc_python.py @@ -16,11 +16,12 @@ # under the License. """ Getting Starting using TVMC Python, the simplified tvm API -========================================== +========================================================== **Authors**: `Jocelyn Shiue `_, -

Welcome to TVMC Python #TODO +Welcome to TVMC Python +====================== Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 ################################################################################ @@ -139,7 +140,8 @@ # ['output_0'] # -

Additional TVMC Functionalities #TODO +Additional TVMC Functionalities +=============================== ################################################################################ # Saving the model From ce6e8a6c86feef4bd6c33b7eb70d2bda983df1d8 Mon Sep 17 00:00:00 2001 From: jshiue Date: Wed, 1 Dec 2021 21:04:40 -0500 Subject: [PATCH 04/32] added tutorial to docs in howto --- docs/how_to/index.rst | 1 + gallery/how_to/use_tvms_python_api/README.txt | 2 + .../how_to/use_tvms_python_api/tvmc_python.py | 236 ++++++++++++++++++ 3 files changed, 239 insertions(+) create mode 100644 gallery/how_to/use_tvms_python_api/README.txt create mode 100644 gallery/how_to/use_tvms_python_api/tvmc_python.py diff --git a/docs/how_to/index.rst b/docs/how_to/index.rst index 433d7acee95a..43475bb1f0da 100644 --- a/docs/how_to/index.rst +++ b/docs/how_to/index.rst @@ -26,6 +26,7 @@ schedule with tesor expressions?" :maxdepth: 1 compile_models/index + use_tvms_python_api/index deploy/index work_with_relay/index work_with_schedules/index diff --git a/gallery/how_to/use_tvms_python_api/README.txt b/gallery/how_to/use_tvms_python_api/README.txt new file mode 100644 index 000000000000..929a011683da --- /dev/null +++ b/gallery/how_to/use_tvms_python_api/README.txt @@ -0,0 +1,2 @@ +Use TVM's Python Scripting API +------------------------------ \ No newline at end of file diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py new file mode 100644 index 000000000000..1324bcaa9be4 --- /dev/null +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -0,0 +1,236 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Getting Starting using TVMC Python, the simplified tvm API +========================================================== +**Authors**: +`Jocelyn Shiue `_, + +Welcome to TVMC Python +====================== +Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 + +################################################################################ +# Step 0: Imports +# --------------- +# +# .. code-block:: python +# +# from tvm.driver import tvmc +# + +################################################################################ +# Step 1: Load a model +# -------------------- +# Let's import our model into tvmc. +# Let's import our model into tvmc. This step converts a machine learning model from +# a supported framework into tvm's high level graph representation language called relay. +# This is to have a unified starting point for all models in tvm. The frameworks we currently +# support are: Keras, Onnx, Tensorflow, TFLite, and Pytorch. +# +# .. code-block:: python +# model = tvmc.load('my_model.onnx') #Step 1: Load +# +# If you'd like to see the relay, you can run: +# ``model.summary()`` +# +# All frameworks support over writing the input shapes with a shape_dict argument. +# For most frameworks this is optional but for Pytorch this is necessary. +# +# .. code-block:: python +# ### Step 1: Load shape_dict Style +# # shape_dict = {'model_input_name1': [1, 3, 224, 224], 'input2': [1, 2, 3, 4], ...} #example format with random numbers +# # model = tvmc.load(model_path, shape_dict=shape_dict) #Step 1: Load + shape_dict +# +# One way to see the model's input/shape_dict is via `netron `_, . After opening the model, +# click the first node to see the name(s) and shape(s) in the inputs section. + + +################################################################################ +# Step 2: Compile +# ---------------- +# Now that our model is in relay, our next step is to compile it to a desired +# hardware to run on. We refer to this hardware as a target. This compilation process +# translates the model from relay into a lower-level language that the +# target machine can understand. +# +# In order to compile a model a tvm.target string is required. +# To learn more about tvm.targets and their options look at the `documentation `_. +# Some examples include: +# 1. cuda (nvidia gpu) +# 2. llvm (cpu) +# 3. llvm -mcpu=cascadelake (intel cpu) +# +# .. code-block:: python +# package = tvmc.compile(model, target="llvm") #Step 2: Compile +# +# The compilation step returns a package. +# + +################################################################################ +# Step 3: Run +# ----------- +# The compiled package can now be run on the hardware target. The device +# input options are: cpu, cuda, cl, metal, and vulkan. +# +# .. code-block:: python +# result = tvmc.run(package, device="cpu") #Step 3: Run +# +# And you can print the results: +# ``print(results)`` +# + +################################################################################ +# Step 1.5: Tune [Optional & Recommended] +# --------------------------------------- +# Run speed can further be improved by tuning. This optional step uses +# machine learning to look at each operation within a model (a function) and +# tries to find a faster way to run it. We do this through a cost model, and +# bench marking possible schedules. +# +# The target is the same as compile. +# +# .. code-block:: python +# tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune +# +# The terminal output should look like: +# [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s +# [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s +# ..... +# +# There may be UserWarnings that can be ignored. +# This should make the end result faster, but it can take hours to tune. +# + +################################################################################ +# Save and then start the process in the terminal: +# ------------------------------------------------ +# +# .. code-block:: python +# python my_tvmc_script.py +# +# Note: Your fans may become very active +# + +################################################################################ +# Example results: +# ---------------- +# +# .. code-block:: python +# Time elapsed for training: 18.99 s +# Execution time summary: +# mean (ms) max (ms) min (ms) std (ms) +# 25.24 26.12 24.89 0.38 +# +# Output Names: +# ['output_0'] +# + +Additional TVMC Functionalities +=============================== + +################################################################################ +# Saving the model +# ---------------- +# +# To make things faster for later, after loading the model (Step 1) save the relay version. +# +# .. code-block:: python +# model = tvmc.load('my_model.onnx') #Step 1: Load +# model.save(model_path) +# + +################################################################################ +# Saving the package +# ------------------ +# +# After the model has been compiled (Step 2) the package also is also saveable. +# +# .. code-block:: python +# tvmc.compile(model, target="llvm", package_path="whatever") #Step 2: Compile +# +# new_package = tvmc.TVMCPackage(package_path="whatever") +# result = tvmc.run(new_package) #Step 3: Run +# + +################################################################################ +# Using Autoscheduler +# ------------------- +# Use the next generation of tvm to enable potentially faster run speed results. +# The search space of the schedules is automatically generated unlike +# previously where they needed to be hand written. (Learn more: 1, 2) +# +# .. code-block:: python +# tvmc.tune(model, target="llvm", enable_autoscheduler = True) #Step 1.5: Optional Tune +# + +################################################################################ +# Saving the tuning results +# ------------------------- +# +# The tuning results can be saved in a file for later reuse. +# +# Method 1: +# .. code-block:: python +# log_file = "hello.json" +# +# # Run tuning +# tvmc.tune(model, target="llvm",tuning_records=log_file) +# +# ... +# +# # Later run tuning and reuse tuning results +# tvmc.tune(model, target="llvm",tuning_records=log_file) +# +# Method 2: +# .. code-block:: python +# # Run tuning +# tuning_records = tvmc.tune(model, target="llvm") +# +# ... +# +# # Later run tuning and reuse tuning results +# tvmc.tune(model, target="llvm",tuning_records=tuning_records) +# + +################################################################################ +# Tuning a more complex model: +# ---------------------------- +# If you notice T's (timeouts) printed, increase the searching time frame: +# +# .. code-block:: python +# tvmc.tune(model,trials=10000,timeout=10,) +# + +################################################################################ +# Compiling a model for a remote device: +# +# A remote procedural call is useful when you would like to compile for hardware +# that is not on your local machine. The tvmc methods support this. +# To set up the RPC server take a look at the 'Set up RPC Server on Device' +# section in this `document `_. +# +# Within the TVMC Script include the following and adjust accordingly: +# +# .. code-block:: python +# tvmc.tune(model,trials=10000,timeout=10,) +# tvmc.tune(model,trials=10000,timeout=10,) +# tvmc.tune(model,trials=10000,timeout=10,) +# tvmc.tune(model,trials=10000,timeout=10,) +# + +""" \ No newline at end of file From b3ff2c990400314527f56ae579c6b733622246dc Mon Sep 17 00:00:00 2001 From: jshiue Date: Wed, 1 Dec 2021 21:20:20 -0500 Subject: [PATCH 05/32] accidentally had two copies of tutorial --- tutorials/get_started/tvmc_python.py | 236 --------------------------- 1 file changed, 236 deletions(-) delete mode 100644 tutorials/get_started/tvmc_python.py diff --git a/tutorials/get_started/tvmc_python.py b/tutorials/get_started/tvmc_python.py deleted file mode 100644 index 1324bcaa9be4..000000000000 --- a/tutorials/get_started/tvmc_python.py +++ /dev/null @@ -1,236 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -""" -Getting Starting using TVMC Python, the simplified tvm API -========================================================== -**Authors**: -`Jocelyn Shiue `_, - -Welcome to TVMC Python -====================== -Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 - -################################################################################ -# Step 0: Imports -# --------------- -# -# .. code-block:: python -# -# from tvm.driver import tvmc -# - -################################################################################ -# Step 1: Load a model -# -------------------- -# Let's import our model into tvmc. -# Let's import our model into tvmc. This step converts a machine learning model from -# a supported framework into tvm's high level graph representation language called relay. -# This is to have a unified starting point for all models in tvm. The frameworks we currently -# support are: Keras, Onnx, Tensorflow, TFLite, and Pytorch. -# -# .. code-block:: python -# model = tvmc.load('my_model.onnx') #Step 1: Load -# -# If you'd like to see the relay, you can run: -# ``model.summary()`` -# -# All frameworks support over writing the input shapes with a shape_dict argument. -# For most frameworks this is optional but for Pytorch this is necessary. -# -# .. code-block:: python -# ### Step 1: Load shape_dict Style -# # shape_dict = {'model_input_name1': [1, 3, 224, 224], 'input2': [1, 2, 3, 4], ...} #example format with random numbers -# # model = tvmc.load(model_path, shape_dict=shape_dict) #Step 1: Load + shape_dict -# -# One way to see the model's input/shape_dict is via `netron `_, . After opening the model, -# click the first node to see the name(s) and shape(s) in the inputs section. - - -################################################################################ -# Step 2: Compile -# ---------------- -# Now that our model is in relay, our next step is to compile it to a desired -# hardware to run on. We refer to this hardware as a target. This compilation process -# translates the model from relay into a lower-level language that the -# target machine can understand. -# -# In order to compile a model a tvm.target string is required. -# To learn more about tvm.targets and their options look at the `documentation `_. -# Some examples include: -# 1. cuda (nvidia gpu) -# 2. llvm (cpu) -# 3. llvm -mcpu=cascadelake (intel cpu) -# -# .. code-block:: python -# package = tvmc.compile(model, target="llvm") #Step 2: Compile -# -# The compilation step returns a package. -# - -################################################################################ -# Step 3: Run -# ----------- -# The compiled package can now be run on the hardware target. The device -# input options are: cpu, cuda, cl, metal, and vulkan. -# -# .. code-block:: python -# result = tvmc.run(package, device="cpu") #Step 3: Run -# -# And you can print the results: -# ``print(results)`` -# - -################################################################################ -# Step 1.5: Tune [Optional & Recommended] -# --------------------------------------- -# Run speed can further be improved by tuning. This optional step uses -# machine learning to look at each operation within a model (a function) and -# tries to find a faster way to run it. We do this through a cost model, and -# bench marking possible schedules. -# -# The target is the same as compile. -# -# .. code-block:: python -# tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune -# -# The terminal output should look like: -# [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s -# [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s -# ..... -# -# There may be UserWarnings that can be ignored. -# This should make the end result faster, but it can take hours to tune. -# - -################################################################################ -# Save and then start the process in the terminal: -# ------------------------------------------------ -# -# .. code-block:: python -# python my_tvmc_script.py -# -# Note: Your fans may become very active -# - -################################################################################ -# Example results: -# ---------------- -# -# .. code-block:: python -# Time elapsed for training: 18.99 s -# Execution time summary: -# mean (ms) max (ms) min (ms) std (ms) -# 25.24 26.12 24.89 0.38 -# -# Output Names: -# ['output_0'] -# - -Additional TVMC Functionalities -=============================== - -################################################################################ -# Saving the model -# ---------------- -# -# To make things faster for later, after loading the model (Step 1) save the relay version. -# -# .. code-block:: python -# model = tvmc.load('my_model.onnx') #Step 1: Load -# model.save(model_path) -# - -################################################################################ -# Saving the package -# ------------------ -# -# After the model has been compiled (Step 2) the package also is also saveable. -# -# .. code-block:: python -# tvmc.compile(model, target="llvm", package_path="whatever") #Step 2: Compile -# -# new_package = tvmc.TVMCPackage(package_path="whatever") -# result = tvmc.run(new_package) #Step 3: Run -# - -################################################################################ -# Using Autoscheduler -# ------------------- -# Use the next generation of tvm to enable potentially faster run speed results. -# The search space of the schedules is automatically generated unlike -# previously where they needed to be hand written. (Learn more: 1, 2) -# -# .. code-block:: python -# tvmc.tune(model, target="llvm", enable_autoscheduler = True) #Step 1.5: Optional Tune -# - -################################################################################ -# Saving the tuning results -# ------------------------- -# -# The tuning results can be saved in a file for later reuse. -# -# Method 1: -# .. code-block:: python -# log_file = "hello.json" -# -# # Run tuning -# tvmc.tune(model, target="llvm",tuning_records=log_file) -# -# ... -# -# # Later run tuning and reuse tuning results -# tvmc.tune(model, target="llvm",tuning_records=log_file) -# -# Method 2: -# .. code-block:: python -# # Run tuning -# tuning_records = tvmc.tune(model, target="llvm") -# -# ... -# -# # Later run tuning and reuse tuning results -# tvmc.tune(model, target="llvm",tuning_records=tuning_records) -# - -################################################################################ -# Tuning a more complex model: -# ---------------------------- -# If you notice T's (timeouts) printed, increase the searching time frame: -# -# .. code-block:: python -# tvmc.tune(model,trials=10000,timeout=10,) -# - -################################################################################ -# Compiling a model for a remote device: -# -# A remote procedural call is useful when you would like to compile for hardware -# that is not on your local machine. The tvmc methods support this. -# To set up the RPC server take a look at the 'Set up RPC Server on Device' -# section in this `document `_. -# -# Within the TVMC Script include the following and adjust accordingly: -# -# .. code-block:: python -# tvmc.tune(model,trials=10000,timeout=10,) -# tvmc.tune(model,trials=10000,timeout=10,) -# tvmc.tune(model,trials=10000,timeout=10,) -# tvmc.tune(model,trials=10000,timeout=10,) -# - -""" \ No newline at end of file From 00a0fdab1fa19ce0ce8a9835eef7bd318f1d49b9 Mon Sep 17 00:00:00 2001 From: CircleSpin Date: Thu, 2 Dec 2021 16:24:08 -0500 Subject: [PATCH 06/32] Update gallery/how_to/use_tvms_python_api/tvmc_python.py Co-authored-by: Leandro Nunes --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 1324bcaa9be4..242a69c3b378 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -17,7 +17,7 @@ """ Getting Starting using TVMC Python, the simplified tvm API ========================================================== -**Authors**: +**Author**: `Jocelyn Shiue `_, Welcome to TVMC Python From d83b579fc41372d9488470db5b8c17964fda57c5 Mon Sep 17 00:00:00 2001 From: CircleSpin Date: Thu, 2 Dec 2021 16:24:23 -0500 Subject: [PATCH 07/32] Update gallery/how_to/use_tvms_python_api/tvmc_python.py Co-authored-by: Leandro Nunes --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 242a69c3b378..641d37d5ec38 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -36,7 +36,7 @@ ################################################################################ # Step 1: Load a model # -------------------- -# Let's import our model into tvmc. + # Let's import our model into tvmc. This step converts a machine learning model from # a supported framework into tvm's high level graph representation language called relay. # This is to have a unified starting point for all models in tvm. The frameworks we currently From 9bf946fad0826d77fa0ca07fa5c947f157463637 Mon Sep 17 00:00:00 2001 From: CircleSpin Date: Thu, 2 Dec 2021 16:24:30 -0500 Subject: [PATCH 08/32] Update gallery/how_to/use_tvms_python_api/tvmc_python.py Co-authored-by: Leandro Nunes --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 641d37d5ec38..81e40c7e6120 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -38,7 +38,7 @@ # -------------------- # Let's import our model into tvmc. This step converts a machine learning model from -# a supported framework into tvm's high level graph representation language called relay. +# a supported framework into TVM's high level graph representation language called relay. # This is to have a unified starting point for all models in tvm. The frameworks we currently # support are: Keras, Onnx, Tensorflow, TFLite, and Pytorch. # From 71e51214cc832b1ecb89d47f6d94e17c97b38e24 Mon Sep 17 00:00:00 2001 From: CircleSpin Date: Thu, 2 Dec 2021 16:24:35 -0500 Subject: [PATCH 09/32] Update gallery/how_to/use_tvms_python_api/tvmc_python.py Co-authored-by: Leandro Nunes --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 81e40c7e6120..548151650321 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -40,7 +40,7 @@ # Let's import our model into tvmc. This step converts a machine learning model from # a supported framework into TVM's high level graph representation language called relay. # This is to have a unified starting point for all models in tvm. The frameworks we currently -# support are: Keras, Onnx, Tensorflow, TFLite, and Pytorch. +# support are: Keras, ONNX, Tensorflow, TFLite, and PyTorch. # # .. code-block:: python # model = tvmc.load('my_model.onnx') #Step 1: Load From 6c4642c68e063b907679b56ca3c489ede5018b66 Mon Sep 17 00:00:00 2001 From: CircleSpin Date: Thu, 2 Dec 2021 17:15:26 -0500 Subject: [PATCH 10/32] Apply suggestions from code review Co-authored-by: Leandro Nunes --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 548151650321..fef3e99dd446 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. """ -Getting Starting using TVMC Python, the simplified tvm API +Getting Starting using TVMC Python: a high-level API for TVM ========================================================== **Author**: `Jocelyn Shiue `_, @@ -71,9 +71,9 @@ # In order to compile a model a tvm.target string is required. # To learn more about tvm.targets and their options look at the `documentation `_. # Some examples include: -# 1. cuda (nvidia gpu) -# 2. llvm (cpu) -# 3. llvm -mcpu=cascadelake (intel cpu) +# 1. cuda (Nvidia GPU) +# 2. llvm (CPU) +# 3. llvm -mcpu=cascadelake (Intel CPU) # # .. code-block:: python # package = tvmc.compile(model, target="llvm") #Step 2: Compile @@ -100,7 +100,7 @@ # Run speed can further be improved by tuning. This optional step uses # machine learning to look at each operation within a model (a function) and # tries to find a faster way to run it. We do this through a cost model, and -# bench marking possible schedules. +# benchmarking possible schedules. # # The target is the same as compile. # @@ -219,7 +219,7 @@ ################################################################################ # Compiling a model for a remote device: # -# A remote procedural call is useful when you would like to compile for hardware +# A remote procedural call (RPC) is useful when you would like to compile for hardware # that is not on your local machine. The tvmc methods support this. # To set up the RPC server take a look at the 'Set up RPC Server on Device' # section in this `document `_. From 871da11d9a39b6b0c43ba1136af7980e432595de Mon Sep 17 00:00:00 2001 From: CircleSpin Date: Thu, 2 Dec 2021 17:18:52 -0500 Subject: [PATCH 11/32] Update gallery/how_to/use_tvms_python_api/tvmc_python.py Co-authored-by: Leandro Nunes --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index fef3e99dd446..e967c7255bf5 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -175,7 +175,7 @@ # previously where they needed to be hand written. (Learn more: 1, 2) # # .. code-block:: python -# tvmc.tune(model, target="llvm", enable_autoscheduler = True) #Step 1.5: Optional Tune +# tvmc.tune(model, target="llvm", enable_autoscheduler = True) # ################################################################################ From 0eaf8583af3f6e0cbc0bbc7bf678fe102137cf25 Mon Sep 17 00:00:00 2001 From: CircleSpin Date: Thu, 2 Dec 2021 17:18:56 -0500 Subject: [PATCH 12/32] Update gallery/how_to/use_tvms_python_api/tvmc_python.py Co-authored-by: Leandro Nunes --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index e967c7255bf5..0a5227a250b0 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -161,7 +161,7 @@ # After the model has been compiled (Step 2) the package also is also saveable. # # .. code-block:: python -# tvmc.compile(model, target="llvm", package_path="whatever") #Step 2: Compile +# tvmc.compile(model, target="llvm", package_path="whatever") # # new_package = tvmc.TVMCPackage(package_path="whatever") # result = tvmc.run(new_package) #Step 3: Run From 26135aca99a4b5efcc408433d7a9fabc2b7c9747 Mon Sep 17 00:00:00 2001 From: jshiue Date: Thu, 2 Dec 2021 22:18:22 -0500 Subject: [PATCH 13/32] added Leandro's suggestions --- .../how_to/use_tvms_python_api/tvmc_python.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 0a5227a250b0..c18572d2cc0a 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -24,6 +24,9 @@ ====================== Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 +An example model to use:""" +wget https://github.com/onnx/models/raw/master/vision/classification/resnet/model/resnet50-v2-7.onnx +""" ################################################################################ # Step 0: Imports # --------------- @@ -36,7 +39,7 @@ ################################################################################ # Step 1: Load a model # -------------------- - +# # Let's import our model into tvmc. This step converts a machine learning model from # a supported framework into TVM's high level graph representation language called relay. # This is to have a unified starting point for all models in tvm. The frameworks we currently @@ -85,7 +88,7 @@ # Step 3: Run # ----------- # The compiled package can now be run on the hardware target. The device -# input options are: cpu, cuda, cl, metal, and vulkan. +# input options are: CPU, Cuda, CL, Metal, and Vulkan. # # .. code-block:: python # result = tvmc.run(package, device="cpu") #Step 3: Run @@ -148,10 +151,11 @@ # ---------------- # # To make things faster for later, after loading the model (Step 1) save the relay version. -# +# The model will then appear where you saved it for later in the coverted syntax. +# # .. code-block:: python # model = tvmc.load('my_model.onnx') #Step 1: Load -# model.save(model_path) +# model.save(desired_model_path) # ################################################################################ @@ -210,7 +214,9 @@ ################################################################################ # Tuning a more complex model: # ---------------------------- -# If you notice T's (timeouts) printed, increase the searching time frame: +# If you notice T's (timeouts) printed like below, +# .........T.T..T..T..T.T.T.T.T.T. +# increase the searching time frame: # # .. code-block:: python # tvmc.tune(model,trials=10000,timeout=10,) From e6213272e3c3bc94d8dd622533d297a34f424540 Mon Sep 17 00:00:00 2001 From: jshiue Date: Thu, 2 Dec 2021 23:10:19 -0500 Subject: [PATCH 14/32] added example model at top --- .../how_to/use_tvms_python_api/tvmc_python.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index c18572d2cc0a..ec2e2ecb2543 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -24,9 +24,17 @@ ====================== Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 -An example model to use:""" -wget https://github.com/onnx/models/raw/master/vision/classification/resnet/model/resnet50-v2-7.onnx -""" +If you don't have an example model, you can download one via the terminal: + + .. code-block:: bash + + mkdir myscripts + cd myscripts + wget https://github.com/onnx/models/raw/master/vision/classification/resnet/model/resnet50-v2-7.onnx + mv resnet50-v2-7.onnx my_model.onnx + touch tvmcpythonintro.py + + ################################################################################ # Step 0: Imports # --------------- @@ -239,4 +247,4 @@ # tvmc.tune(model,trials=10000,timeout=10,) # -""" \ No newline at end of file +""" From a92f9ed0beef8e5654b9adecba4c4778a95c054d Mon Sep 17 00:00:00 2001 From: jshiue Date: Thu, 2 Dec 2021 23:16:01 -0500 Subject: [PATCH 15/32] added example model, blacked it --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index ec2e2ecb2543..e0b0cd843074 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -24,7 +24,8 @@ ====================== Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 -If you don't have an example model, you can download one via the terminal: +Before we get started let's get an example model if you don't already have one. +Follow the steps to download a resnet model via the terminal: .. code-block:: bash @@ -34,6 +35,7 @@ mv resnet50-v2-7.onnx my_model.onnx touch tvmcpythonintro.py +Let's start editing the python file in your favorite text editor. ################################################################################ # Step 0: Imports From 312328035e991c831d3e80df35afb7d7a106c7bc Mon Sep 17 00:00:00 2001 From: jshiue Date: Fri, 3 Dec 2021 01:41:36 -0500 Subject: [PATCH 16/32] trying to get docs to build --- docs/conf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/conf.py b/docs/conf.py index e74df6cf1e0e..a8fd18624447 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -220,12 +220,14 @@ def git_describe_version(original_version): tvm_path.joinpath("gallery", "how_to", "tune_with_autoscheduler"), tvm_path.joinpath("gallery", "how_to", "work_with_microtvm"), tvm_path.joinpath("gallery", "how_to", "extend_tvm"), + tvm_path.joinpath("gallery", "how_to", "use_tvms_python_api"), tvm_path.joinpath("vta", "tutorials"), ] gallery_dirs = [ "tutorial", "how_to/compile_models", + "how_to/use_tvms_python_api", "how_to/deploy_models", "how_to/work_with_relay", "how_to/work_with_schedules", From 0df1e56af98c6e87e55ccdc2d8e18f1c681ff43c Mon Sep 17 00:00:00 2001 From: jshiue Date: Fri, 3 Dec 2021 14:04:37 -0500 Subject: [PATCH 17/32] underline too short for title --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index e0b0cd843074..9eaa2701a6a4 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -16,7 +16,7 @@ # under the License. """ Getting Starting using TVMC Python: a high-level API for TVM -========================================================== +============================================================= **Author**: `Jocelyn Shiue `_, From f14f400157c370dd0e93e472b5c5667fb9ad9bd4 Mon Sep 17 00:00:00 2001 From: jshiue Date: Mon, 13 Dec 2021 21:16:23 -0500 Subject: [PATCH 18/32] forgot Jetson info, added Chris H comments --- .../how_to/use_tvms_python_api/tvmc_python.py | 33 ++++++++++++------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 9eaa2701a6a4..dc97676bb65f 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -51,34 +51,35 @@ # -------------------- # # Let's import our model into tvmc. This step converts a machine learning model from -# a supported framework into TVM's high level graph representation language called relay. +# a supported framework into TVM's high level graph representation language called Relay. # This is to have a unified starting point for all models in tvm. The frameworks we currently # support are: Keras, ONNX, Tensorflow, TFLite, and PyTorch. # # .. code-block:: python # model = tvmc.load('my_model.onnx') #Step 1: Load # -# If you'd like to see the relay, you can run: +# If you'd like to see the Relay, you can run: # ``model.summary()`` # -# All frameworks support over writing the input shapes with a shape_dict argument. -# For most frameworks this is optional but for Pytorch this is necessary. +# All frameworks support overwriting the input shapes with a shape_dict argument. +# For most frameworks this is optional, but for Pytorch this is necessary as +# TVM cannot automatically search for it. # # .. code-block:: python # ### Step 1: Load shape_dict Style # # shape_dict = {'model_input_name1': [1, 3, 224, 224], 'input2': [1, 2, 3, 4], ...} #example format with random numbers # # model = tvmc.load(model_path, shape_dict=shape_dict) #Step 1: Load + shape_dict # -# One way to see the model's input/shape_dict is via `netron `_, . After opening the model, +# A suggested way to see the model's input/shape_dict is via `netron `_, . After opening the model, # click the first node to see the name(s) and shape(s) in the inputs section. ################################################################################ # Step 2: Compile # ---------------- -# Now that our model is in relay, our next step is to compile it to a desired +# Now that our model is in Relay, our next step is to compile it to a desired # hardware to run on. We refer to this hardware as a target. This compilation process -# translates the model from relay into a lower-level language that the +# translates the model from Relay into a lower-level language that the # target machine can understand. # # In order to compile a model a tvm.target string is required. @@ -127,7 +128,11 @@ # # There may be UserWarnings that can be ignored. # This should make the end result faster, but it can take hours to tune. +# +# See the section 'Saving the Tuning Results' below. Be sure to pass the tuning +# results into compile. # +# Ex: tvmc.compile(model, target="llvm", tuning_records = "records.log") #Step 2: Compile ################################################################################ # Save and then start the process in the terminal: @@ -160,7 +165,7 @@ # Saving the model # ---------------- # -# To make things faster for later, after loading the model (Step 1) save the relay version. +# To make things faster for later, after loading the model (Step 1) save the Relay version. # The model will then appear where you saved it for later in the coverted syntax. # # .. code-block:: python @@ -243,10 +248,14 @@ # Within the TVMC Script include the following and adjust accordingly: # # .. code-block:: python -# tvmc.tune(model,trials=10000,timeout=10,) -# tvmc.tune(model,trials=10000,timeout=10,) -# tvmc.tune(model,trials=10000,timeout=10,) -# tvmc.tune(model,trials=10000,timeout=10,) +# tvmc.tune( +# model, +# target=target, # Compilation target as string // Device to compile for +# target_host=target_host, # Host processor +# hostname=host_ip_address, #The IP address of an RPC tracker, used when benchmarking remotely. +# port=port_number, # The port of the RPC tracker to connect to. Defaults to 9090. +# rpc_key=your_key, # The RPC tracker key of the target device. Required when rpc_tracker is provided +# ) # """ From 9f9d404624e3730a1ea4c1fba2bb2ac215c7a368 Mon Sep 17 00:00:00 2001 From: jshiue Date: Tue, 14 Dec 2021 13:58:55 -0500 Subject: [PATCH 19/32] reformatting text --- .../how_to/use_tvms_python_api/tvmc_python.py | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index dc97676bb65f..4fdddf1039d1 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -36,10 +36,11 @@ touch tvmcpythonintro.py Let's start editing the python file in your favorite text editor. +""" ################################################################################ # Step 0: Imports -# --------------- +# ~~~~~~~~~~~~~~~ # # .. code-block:: python # @@ -48,7 +49,7 @@ ################################################################################ # Step 1: Load a model -# -------------------- +# ~~~~~~~~~~~~~~~~~~~~ # # Let's import our model into tvmc. This step converts a machine learning model from # a supported framework into TVM's high level graph representation language called Relay. @@ -76,7 +77,8 @@ ################################################################################ # Step 2: Compile -# ---------------- +# ~~~~~~~~~~~~~~~ +# # Now that our model is in Relay, our next step is to compile it to a desired # hardware to run on. We refer to this hardware as a target. This compilation process # translates the model from Relay into a lower-level language that the @@ -97,7 +99,8 @@ ################################################################################ # Step 3: Run -# ----------- +# ~~~~~~~~~~~ +# # The compiled package can now be run on the hardware target. The device # input options are: CPU, Cuda, CL, Metal, and Vulkan. # @@ -110,7 +113,8 @@ ################################################################################ # Step 1.5: Tune [Optional & Recommended] -# --------------------------------------- +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# # Run speed can further be improved by tuning. This optional step uses # machine learning to look at each operation within a model (a function) and # tries to find a faster way to run it. We do this through a cost model, and @@ -136,7 +140,7 @@ ################################################################################ # Save and then start the process in the terminal: -# ------------------------------------------------ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # .. code-block:: python # python my_tvmc_script.py @@ -146,7 +150,7 @@ ################################################################################ # Example results: -# ---------------- +# ~~~~~~~~~~~~~~~~ # # .. code-block:: python # Time elapsed for training: 18.99 s @@ -158,12 +162,14 @@ # ['output_0'] # -Additional TVMC Functionalities -=============================== +################################################################################ +# Additional TVMC Functionalities +# ------------------------------- +# ################################################################################ # Saving the model -# ---------------- +# ~~~~~~~~~~~~~~~~ # # To make things faster for later, after loading the model (Step 1) save the Relay version. # The model will then appear where you saved it for later in the coverted syntax. @@ -175,7 +181,7 @@ ################################################################################ # Saving the package -# ------------------ +# ~~~~~~~~~~~~~~~~~~ # # After the model has been compiled (Step 2) the package also is also saveable. # @@ -188,7 +194,8 @@ ################################################################################ # Using Autoscheduler -# ------------------- +# ~~~~~~~~~~~~~~~~~~~ +# # Use the next generation of tvm to enable potentially faster run speed results. # The search space of the schedules is automatically generated unlike # previously where they needed to be hand written. (Learn more: 1, 2) @@ -199,7 +206,7 @@ ################################################################################ # Saving the tuning results -# ------------------------- +# ~~~~~~~~~~~~~~~~~~~~~~~~~ # # The tuning results can be saved in a file for later reuse. # @@ -228,7 +235,8 @@ ################################################################################ # Tuning a more complex model: -# ---------------------------- +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# # If you notice T's (timeouts) printed like below, # .........T.T..T..T..T.T.T.T.T.T. # increase the searching time frame: @@ -239,6 +247,7 @@ ################################################################################ # Compiling a model for a remote device: +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # A remote procedural call (RPC) is useful when you would like to compile for hardware # that is not on your local machine. The tvmc methods support this. @@ -258,4 +267,3 @@ # ) # -""" From 8ba2d614b0428026d705e6d311396ab4e88787f2 Mon Sep 17 00:00:00 2001 From: jshiue Date: Tue, 14 Dec 2021 22:13:46 -0500 Subject: [PATCH 20/32] black --- .../how_to/use_tvms_python_api/tvmc_python.py | 177 +++++++++--------- 1 file changed, 88 insertions(+), 89 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 4fdddf1039d1..fcc4b0d110e5 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -41,7 +41,7 @@ ################################################################################ # Step 0: Imports # ~~~~~~~~~~~~~~~ -# +# # .. code-block:: python # # from tvm.driver import tvmc @@ -51,41 +51,41 @@ # Step 1: Load a model # ~~~~~~~~~~~~~~~~~~~~ # -# Let's import our model into tvmc. This step converts a machine learning model from -# a supported framework into TVM's high level graph representation language called Relay. -# This is to have a unified starting point for all models in tvm. The frameworks we currently +# Let's import our model into tvmc. This step converts a machine learning model from +# a supported framework into TVM's high level graph representation language called Relay. +# This is to have a unified starting point for all models in tvm. The frameworks we currently # support are: Keras, ONNX, Tensorflow, TFLite, and PyTorch. -# +# # .. code-block:: python # model = tvmc.load('my_model.onnx') #Step 1: Load -# -# If you'd like to see the Relay, you can run: +# +# If you'd like to see the Relay, you can run: # ``model.summary()`` -# -# All frameworks support overwriting the input shapes with a shape_dict argument. -# For most frameworks this is optional, but for Pytorch this is necessary as -# TVM cannot automatically search for it. +# +# All frameworks support overwriting the input shapes with a shape_dict argument. +# For most frameworks this is optional, but for Pytorch this is necessary as +# TVM cannot automatically search for it. # # .. code-block:: python -# ### Step 1: Load shape_dict Style +# ### Step 1: Load shape_dict Style # # shape_dict = {'model_input_name1': [1, 3, 224, 224], 'input2': [1, 2, 3, 4], ...} #example format with random numbers # # model = tvmc.load(model_path, shape_dict=shape_dict) #Step 1: Load + shape_dict -# -# A suggested way to see the model's input/shape_dict is via `netron `_, . After opening the model, +# +# A suggested way to see the model's input/shape_dict is via `netron `_, . After opening the model, # click the first node to see the name(s) and shape(s) in the inputs section. ################################################################################ # Step 2: Compile # ~~~~~~~~~~~~~~~ -# -# Now that our model is in Relay, our next step is to compile it to a desired -# hardware to run on. We refer to this hardware as a target. This compilation process -# translates the model from Relay into a lower-level language that the -# target machine can understand. -# -# In order to compile a model a tvm.target string is required. -# To learn more about tvm.targets and their options look at the `documentation `_. +# +# Now that our model is in Relay, our next step is to compile it to a desired +# hardware to run on. We refer to this hardware as a target. This compilation process +# translates the model from Relay into a lower-level language that the +# target machine can understand. +# +# In order to compile a model a tvm.target string is required. +# To learn more about tvm.targets and their options look at the `documentation `_. # Some examples include: # 1. cuda (Nvidia GPU) # 2. llvm (CPU) @@ -93,71 +93,71 @@ # # .. code-block:: python # package = tvmc.compile(model, target="llvm") #Step 2: Compile -# +# # The compilation step returns a package. -# +# ################################################################################ # Step 3: Run # ~~~~~~~~~~~ -# -# The compiled package can now be run on the hardware target. The device +# +# The compiled package can now be run on the hardware target. The device # input options are: CPU, Cuda, CL, Metal, and Vulkan. -# +# # .. code-block:: python # result = tvmc.run(package, device="cpu") #Step 3: Run -# +# # And you can print the results: # ``print(results)`` -# +# ################################################################################ # Step 1.5: Tune [Optional & Recommended] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# -# Run speed can further be improved by tuning. This optional step uses -# machine learning to look at each operation within a model (a function) and -# tries to find a faster way to run it. We do this through a cost model, and +# +# Run speed can further be improved by tuning. This optional step uses +# machine learning to look at each operation within a model (a function) and +# tries to find a faster way to run it. We do this through a cost model, and # benchmarking possible schedules. -# -# The target is the same as compile. -# +# +# The target is the same as compile. +# # .. code-block:: python # tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune -# -# The terminal output should look like: +# +# The terminal output should look like: # [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s # [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s # ..... -# -# There may be UserWarnings that can be ignored. -# This should make the end result faster, but it can take hours to tune. # -# See the section 'Saving the Tuning Results' below. Be sure to pass the tuning -# results into compile. -# +# There may be UserWarnings that can be ignored. +# This should make the end result faster, but it can take hours to tune. +# +# See the section 'Saving the Tuning Results' below. Be sure to pass the tuning +# results into compile. +# # Ex: tvmc.compile(model, target="llvm", tuning_records = "records.log") #Step 2: Compile ################################################################################ # Save and then start the process in the terminal: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# +# # .. code-block:: python # python my_tvmc_script.py -# +# # Note: Your fans may become very active -# +# ################################################################################ # Example results: # ~~~~~~~~~~~~~~~~ -# +# # .. code-block:: python # Time elapsed for training: 18.99 s # Execution time summary: -# mean (ms) max (ms) min (ms) std (ms) -# 25.24 26.12 24.89 0.38 -# +# mean (ms) max (ms) min (ms) std (ms) +# 25.24 26.12 24.89 0.38 +# # Output Names: # ['output_0'] # @@ -165,41 +165,41 @@ ################################################################################ # Additional TVMC Functionalities # ------------------------------- -# +# ################################################################################ # Saving the model # ~~~~~~~~~~~~~~~~ -# -# To make things faster for later, after loading the model (Step 1) save the Relay version. +# +# To make things faster for later, after loading the model (Step 1) save the Relay version. # The model will then appear where you saved it for later in the coverted syntax. # # .. code-block:: python # model = tvmc.load('my_model.onnx') #Step 1: Load -# model.save(desired_model_path) -# +# model.save(desired_model_path) +# ################################################################################ # Saving the package # ~~~~~~~~~~~~~~~~~~ -# -# After the model has been compiled (Step 2) the package also is also saveable. -# +# +# After the model has been compiled (Step 2) the package also is also saveable. +# # .. code-block:: python # tvmc.compile(model, target="llvm", package_path="whatever") -# -# new_package = tvmc.TVMCPackage(package_path="whatever") +# +# new_package = tvmc.TVMCPackage(package_path="whatever") # result = tvmc.run(new_package) #Step 3: Run -# +# ################################################################################ # Using Autoscheduler # ~~~~~~~~~~~~~~~~~~~ -# -# Use the next generation of tvm to enable potentially faster run speed results. -# The search space of the schedules is automatically generated unlike -# previously where they needed to be hand written. (Learn more: 1, 2) -# +# +# Use the next generation of tvm to enable potentially faster run speed results. +# The search space of the schedules is automatically generated unlike +# previously where they needed to be hand written. (Learn more: 1, 2) +# # .. code-block:: python # tvmc.tune(model, target="llvm", enable_autoscheduler = True) # @@ -207,40 +207,40 @@ ################################################################################ # Saving the tuning results # ~~~~~~~~~~~~~~~~~~~~~~~~~ -# +# # The tuning results can be saved in a file for later reuse. -# +# # Method 1: # .. code-block:: python # log_file = "hello.json" -# +# # # Run tuning # tvmc.tune(model, target="llvm",tuning_records=log_file) -# +# # ... -# +# # # Later run tuning and reuse tuning results # tvmc.tune(model, target="llvm",tuning_records=log_file) -# +# # Method 2: # .. code-block:: python # # Run tuning # tuning_records = tvmc.tune(model, target="llvm") -# +# # ... -# +# # # Later run tuning and reuse tuning results # tvmc.tune(model, target="llvm",tuning_records=tuning_records) -# +# ################################################################################ # Tuning a more complex model: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# +# # If you notice T's (timeouts) printed like below, -# .........T.T..T..T..T.T.T.T.T.T. -# increase the searching time frame: -# +# .........T.T..T..T..T.T.T.T.T.T. +# increase the searching time frame: +# # .. code-block:: python # tvmc.tune(model,trials=10000,timeout=10,) # @@ -248,14 +248,14 @@ ################################################################################ # Compiling a model for a remote device: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# -# A remote procedural call (RPC) is useful when you would like to compile for hardware -# that is not on your local machine. The tvmc methods support this. -# To set up the RPC server take a look at the 'Set up RPC Server on Device' -# section in this `document `_. -# +# +# A remote procedural call (RPC) is useful when you would like to compile for hardware +# that is not on your local machine. The tvmc methods support this. +# To set up the RPC server take a look at the 'Set up RPC Server on Device' +# section in this `document `_. +# # Within the TVMC Script include the following and adjust accordingly: -# +# # .. code-block:: python # tvmc.tune( # model, @@ -265,5 +265,4 @@ # port=port_number, # The port of the RPC tracker to connect to. Defaults to 9090. # rpc_key=your_key, # The RPC tracker key of the target device. Required when rpc_tracker is provided # ) -# - +# From 4985e15573bc1c8312d01469d685e60fc27c3cec Mon Sep 17 00:00:00 2001 From: jshiue Date: Wed, 15 Dec 2021 17:32:44 -0500 Subject: [PATCH 21/32] hitting code block issue, trying to debug --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index fcc4b0d110e5..29b2ba1c4f25 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -27,8 +27,6 @@ Before we get started let's get an example model if you don't already have one. Follow the steps to download a resnet model via the terminal: - .. code-block:: bash - mkdir myscripts cd myscripts wget https://github.com/onnx/models/raw/master/vision/classification/resnet/model/resnet50-v2-7.onnx From aee6db3e2df7250e78072e4476a7bdaaf6f934c7 Mon Sep 17 00:00:00 2001 From: jshiue Date: Wed, 15 Dec 2021 20:20:32 -0500 Subject: [PATCH 22/32] added spaces after the python codeblock --- .../how_to/use_tvms_python_api/tvmc_python.py | 81 ++++++++++++------- 1 file changed, 51 insertions(+), 30 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 29b2ba1c4f25..0520ab1fe261 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -40,10 +40,11 @@ # Step 0: Imports # ~~~~~~~~~~~~~~~ # -# .. code-block:: python +# .. code-block:: python # # from tvm.driver import tvmc # +# ################################################################################ # Step 1: Load a model @@ -55,7 +56,8 @@ # support are: Keras, ONNX, Tensorflow, TFLite, and PyTorch. # # .. code-block:: python -# model = tvmc.load('my_model.onnx') #Step 1: Load +# +# model = tvmc.load('my_model.onnx') #Step 1: Load # # If you'd like to see the Relay, you can run: # ``model.summary()`` @@ -65,10 +67,11 @@ # TVM cannot automatically search for it. # # .. code-block:: python -# ### Step 1: Load shape_dict Style -# # shape_dict = {'model_input_name1': [1, 3, 224, 224], 'input2': [1, 2, 3, 4], ...} #example format with random numbers -# # model = tvmc.load(model_path, shape_dict=shape_dict) #Step 1: Load + shape_dict # +# ### Step 1: Load shape_dict Style +# # shape_dict = {'model_input_name1': [1, 3, 224, 224], 'input2': [1, 2, 3, 4], ...} #example format with random numbers +# # model = tvmc.load(model_path, shape_dict=shape_dict) #Step 1: Load + shape_dict +# # A suggested way to see the model's input/shape_dict is via `netron `_, . After opening the model, # click the first node to see the name(s) and shape(s) in the inputs section. @@ -89,8 +92,10 @@ # 2. llvm (CPU) # 3. llvm -mcpu=cascadelake (Intel CPU) # -# .. code-block:: python -# package = tvmc.compile(model, target="llvm") #Step 2: Compile +# .. code-block:: python +# +# package = tvmc.compile(model, target="llvm") #Step 2: Compile +# # # The compilation step returns a package. # @@ -102,8 +107,9 @@ # The compiled package can now be run on the hardware target. The device # input options are: CPU, Cuda, CL, Metal, and Vulkan. # -# .. code-block:: python -# result = tvmc.run(package, device="cpu") #Step 3: Run +# .. code-block:: python +# +# result = tvmc.run(package, device="cpu") #Step 3: Run # # And you can print the results: # ``print(results)`` @@ -120,8 +126,9 @@ # # The target is the same as compile. # -# .. code-block:: python -# tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune +# .. code-block:: python +# +# tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune # # The terminal output should look like: # [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s @@ -140,8 +147,9 @@ # Save and then start the process in the terminal: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # -# .. code-block:: python -# python my_tvmc_script.py +# .. code-block:: python +# +# python my_tvmc_script.py # # Note: Your fans may become very active # @@ -150,15 +158,18 @@ # Example results: # ~~~~~~~~~~~~~~~~ # -# .. code-block:: python -# Time elapsed for training: 18.99 s -# Execution time summary: -# mean (ms) max (ms) min (ms) std (ms) -# 25.24 26.12 24.89 0.38 -# -# Output Names: -# ['output_0'] +# .. code-block:: python # +# Time elapsed for training: 18.99 s +# Execution time summary: +# mean (ms) max (ms) min (ms) std (ms) +# 25.24 26.12 24.89 0.38 +# +# +# Output Names: +# ['output_0'] +# + ################################################################################ # Additional TVMC Functionalities @@ -172,9 +183,11 @@ # To make things faster for later, after loading the model (Step 1) save the Relay version. # The model will then appear where you saved it for later in the coverted syntax. # -# .. code-block:: python -# model = tvmc.load('my_model.onnx') #Step 1: Load -# model.save(desired_model_path) +# .. code-block:: python +# +# model = tvmc.load('my_model.onnx') #Step 1: Load +# model.save(desired_model_path) +# # ################################################################################ @@ -183,11 +196,13 @@ # # After the model has been compiled (Step 2) the package also is also saveable. # -# .. code-block:: python -# tvmc.compile(model, target="llvm", package_path="whatever") +# .. code-block:: python +# +# tvmc.compile(model, target="llvm", package_path="whatever") +# +# new_package = tvmc.TVMCPackage(package_path="whatever") +# result = tvmc.run(new_package) #Step 3: Run # -# new_package = tvmc.TVMCPackage(package_path="whatever") -# result = tvmc.run(new_package) #Step 3: Run # ################################################################################ @@ -198,8 +213,10 @@ # The search space of the schedules is automatically generated unlike # previously where they needed to be hand written. (Learn more: 1, 2) # -# .. code-block:: python -# tvmc.tune(model, target="llvm", enable_autoscheduler = True) +# .. code-block:: python +# +# tvmc.tune(model, target="llvm", enable_autoscheduler = True) +# # ################################################################################ @@ -210,6 +227,7 @@ # # Method 1: # .. code-block:: python +# # log_file = "hello.json" # # # Run tuning @@ -222,6 +240,7 @@ # # Method 2: # .. code-block:: python +# # # Run tuning # tuning_records = tvmc.tune(model, target="llvm") # @@ -240,6 +259,7 @@ # increase the searching time frame: # # .. code-block:: python +# # tvmc.tune(model,trials=10000,timeout=10,) # @@ -255,6 +275,7 @@ # Within the TVMC Script include the following and adjust accordingly: # # .. code-block:: python +# # tvmc.tune( # model, # target=target, # Compilation target as string // Device to compile for From 74b3826e300c9565f717e54e1e8dc3c7aaade80a Mon Sep 17 00:00:00 2001 From: jshiue Date: Wed, 15 Dec 2021 20:30:28 -0500 Subject: [PATCH 23/32] black --- .../how_to/use_tvms_python_api/tvmc_python.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 0520ab1fe261..d1a0dd36c81f 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -44,7 +44,7 @@ # # from tvm.driver import tvmc # -# +# ################################################################################ # Step 1: Load a model @@ -71,7 +71,7 @@ # ### Step 1: Load shape_dict Style # # shape_dict = {'model_input_name1': [1, 3, 224, 224], 'input2': [1, 2, 3, 4], ...} #example format with random numbers # # model = tvmc.load(model_path, shape_dict=shape_dict) #Step 1: Load + shape_dict -# +# # A suggested way to see the model's input/shape_dict is via `netron `_, . After opening the model, # click the first node to see the name(s) and shape(s) in the inputs section. @@ -95,7 +95,7 @@ # .. code-block:: python # # package = tvmc.compile(model, target="llvm") #Step 2: Compile -# +# # # The compilation step returns a package. # @@ -164,11 +164,11 @@ # Execution time summary: # mean (ms) max (ms) min (ms) std (ms) # 25.24 26.12 24.89 0.38 -# -# +# +# # Output Names: # ['output_0'] -# +# ################################################################################ @@ -187,7 +187,7 @@ # # model = tvmc.load('my_model.onnx') #Step 1: Load # model.save(desired_model_path) -# +# # ################################################################################ @@ -199,7 +199,7 @@ # .. code-block:: python # # tvmc.compile(model, target="llvm", package_path="whatever") -# +# # new_package = tvmc.TVMCPackage(package_path="whatever") # result = tvmc.run(new_package) #Step 3: Run # @@ -216,7 +216,7 @@ # .. code-block:: python # # tvmc.tune(model, target="llvm", enable_autoscheduler = True) -# +# # ################################################################################ @@ -227,7 +227,7 @@ # # Method 1: # .. code-block:: python -# +# # log_file = "hello.json" # # # Run tuning @@ -240,7 +240,7 @@ # # Method 2: # .. code-block:: python -# +# # # Run tuning # tuning_records = tvmc.tune(model, target="llvm") # @@ -259,7 +259,7 @@ # increase the searching time frame: # # .. code-block:: python -# +# # tvmc.tune(model,trials=10000,timeout=10,) # @@ -275,7 +275,7 @@ # Within the TVMC Script include the following and adjust accordingly: # # .. code-block:: python -# +# # tvmc.tune( # model, # target=target, # Compilation target as string // Device to compile for From 45ac07e76bfc4a08322d89a444e209453139c314 Mon Sep 17 00:00:00 2001 From: jshiue Date: Fri, 17 Dec 2021 20:36:26 -0500 Subject: [PATCH 24/32] changing formatting --- .../how_to/use_tvms_python_api/tvmc_python.py | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index d1a0dd36c81f..29a0fbfa48b5 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -18,7 +18,7 @@ Getting Starting using TVMC Python: a high-level API for TVM ============================================================= **Author**: -`Jocelyn Shiue `_, +`Jocelyn Shiue `_ Welcome to TVMC Python ====================== @@ -27,6 +27,8 @@ Before we get started let's get an example model if you don't already have one. Follow the steps to download a resnet model via the terminal: + .. code-block:: python + mkdir myscripts cd myscripts wget https://github.com/onnx/models/raw/master/vision/classification/resnet/model/resnet50-v2-7.onnx @@ -68,11 +70,9 @@ # # .. code-block:: python # -# ### Step 1: Load shape_dict Style -# # shape_dict = {'model_input_name1': [1, 3, 224, 224], 'input2': [1, 2, 3, 4], ...} #example format with random numbers -# # model = tvmc.load(model_path, shape_dict=shape_dict) #Step 1: Load + shape_dict +# #model = tvmc.load(model_path, shape_dict={'input1' : [1, 2, 3, 4], 'input2' : [1, 2, 3, 4]}) #Step 1: Load + shape_dict # -# A suggested way to see the model's input/shape_dict is via `netron `_, . After opening the model, +# A suggested way to see the model's input/shape_dict is via `netron `_. After opening the model, # click the first node to see the name(s) and shape(s) in the inputs section. @@ -131,9 +131,11 @@ # tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune # # The terminal output should look like: -# [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s -# [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s -# ..... +# +# .. code-block:: python +# [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s +# [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s +# ..... # # There may be UserWarnings that can be ignored. # This should make the end result faster, but it can take hours to tune. @@ -173,7 +175,7 @@ ################################################################################ # Additional TVMC Functionalities -# ------------------------------- +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ################################################################################ @@ -255,7 +257,7 @@ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # If you notice T's (timeouts) printed like below, -# .........T.T..T..T..T.T.T.T.T.T. +# ``.........T.T..T..T..T.T.T.T.T.T.`` # increase the searching time frame: # # .. code-block:: python From 0285d842e3519e26ec4c7f89113e82f9aafe0749 Mon Sep 17 00:00:00 2001 From: jshiue Date: Fri, 17 Dec 2021 20:58:04 -0500 Subject: [PATCH 25/32] touching up more edits' --- .../how_to/use_tvms_python_api/tvmc_python.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 29a0fbfa48b5..7df3c424d359 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -88,9 +88,9 @@ # In order to compile a model a tvm.target string is required. # To learn more about tvm.targets and their options look at the `documentation `_. # Some examples include: -# 1. cuda (Nvidia GPU) -# 2. llvm (CPU) -# 3. llvm -mcpu=cascadelake (Intel CPU) +# 1. cuda (Nvidia GPU) +# 2. llvm (CPU) +# 3. llvm -mcpu=cascadelake (Intel CPU) # # .. code-block:: python # @@ -133,6 +133,7 @@ # The terminal output should look like: # # .. code-block:: python +# # [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s # [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s # ..... @@ -143,7 +144,9 @@ # See the section 'Saving the Tuning Results' below. Be sure to pass the tuning # results into compile. # -# Ex: tvmc.compile(model, target="llvm", tuning_records = "records.log") #Step 2: Compile +# .. code-block:: python +# +# #tvmc.compile(model, target="llvm", tuning_records = "records.log") #Step 2: Compile ################################################################################ # Save and then start the process in the terminal: @@ -213,7 +216,9 @@ # # Use the next generation of tvm to enable potentially faster run speed results. # The search space of the schedules is automatically generated unlike -# previously where they needed to be hand written. (Learn more: 1, 2) +# previously where they needed to be hand written. (Learn more: +# `1 `_, +# `2 `_ ) # # .. code-block:: python # @@ -256,8 +261,7 @@ # Tuning a more complex model: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # -# If you notice T's (timeouts) printed like below, -# ``.........T.T..T..T..T.T.T.T.T.T.`` +# If you notice T's print ``.........T.T..T..T..T.T.T.T.T.T.`` # increase the searching time frame: # # .. code-block:: python From 7fcc2c8e7a5389878bea4d00e91899d3eff4cc77 Mon Sep 17 00:00:00 2001 From: jshiue Date: Fri, 17 Dec 2021 21:09:19 -0500 Subject: [PATCH 26/32] more touchups --- gallery/how_to/use_tvms_python_api/tvmc_python.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/how_to/use_tvms_python_api/tvmc_python.py index 7df3c424d359..f1793d7a8b2b 100644 --- a/gallery/how_to/use_tvms_python_api/tvmc_python.py +++ b/gallery/how_to/use_tvms_python_api/tvmc_python.py @@ -70,7 +70,7 @@ # # .. code-block:: python # -# #model = tvmc.load(model_path, shape_dict={'input1' : [1, 2, 3, 4], 'input2' : [1, 2, 3, 4]}) #Step 1: Load + shape_dict +# #model = tvmc.load(my_model, shape_dict={'input1' : [1, 2, 3, 4], 'input2' : [1, 2, 3, 4]}) #Step 1: Load + shape_dict # # A suggested way to see the model's input/shape_dict is via `netron `_. After opening the model, # click the first node to see the name(s) and shape(s) in the inputs section. @@ -142,7 +142,7 @@ # This should make the end result faster, but it can take hours to tune. # # See the section 'Saving the Tuning Results' below. Be sure to pass the tuning -# results into compile. +# results into compile if you want the results to apply. # # .. code-block:: python # @@ -218,7 +218,7 @@ # The search space of the schedules is automatically generated unlike # previously where they needed to be hand written. (Learn more: # `1 `_, -# `2 `_ ) +# `2 `_) # # .. code-block:: python # @@ -261,7 +261,7 @@ # Tuning a more complex model: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # -# If you notice T's print ``.........T.T..T..T..T.T.T.T.T.T.`` +# If you notice T's printing that look like ``.........T.T..T..T..T.T.T.T.T.T.`` # increase the searching time frame: # # .. code-block:: python From 1786f4ad90bd1e75ba5767e7a55cb5e993afa189 Mon Sep 17 00:00:00 2001 From: jshiue Date: Fri, 17 Dec 2021 22:46:04 -0500 Subject: [PATCH 27/32] changed location of file to tutorial section --- docs/conf.py | 3 +-- gallery/how_to/use_tvms_python_api/README.txt | 2 -- .../{how_to/use_tvms_python_api => tutorial}/tvmc_python.py | 0 3 files changed, 1 insertion(+), 4 deletions(-) delete mode 100644 gallery/how_to/use_tvms_python_api/README.txt rename gallery/{how_to/use_tvms_python_api => tutorial}/tvmc_python.py (100%) diff --git a/docs/conf.py b/docs/conf.py index a8fd18624447..cce7f6c63d1b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -220,14 +220,12 @@ def git_describe_version(original_version): tvm_path.joinpath("gallery", "how_to", "tune_with_autoscheduler"), tvm_path.joinpath("gallery", "how_to", "work_with_microtvm"), tvm_path.joinpath("gallery", "how_to", "extend_tvm"), - tvm_path.joinpath("gallery", "how_to", "use_tvms_python_api"), tvm_path.joinpath("vta", "tutorials"), ] gallery_dirs = [ "tutorial", "how_to/compile_models", - "how_to/use_tvms_python_api", "how_to/deploy_models", "how_to/work_with_relay", "how_to/work_with_schedules", @@ -257,6 +255,7 @@ def git_describe_version(original_version): "introduction.py", "install.py", "tvmc_command_line_driver.py", + "tvmc_python.py" "autotvm_relay_x86.py", "tensor_expr_get_started.py", "autotvm_matmul_x86.py", diff --git a/gallery/how_to/use_tvms_python_api/README.txt b/gallery/how_to/use_tvms_python_api/README.txt deleted file mode 100644 index 929a011683da..000000000000 --- a/gallery/how_to/use_tvms_python_api/README.txt +++ /dev/null @@ -1,2 +0,0 @@ -Use TVM's Python Scripting API ------------------------------- \ No newline at end of file diff --git a/gallery/how_to/use_tvms_python_api/tvmc_python.py b/gallery/tutorial/tvmc_python.py similarity index 100% rename from gallery/how_to/use_tvms_python_api/tvmc_python.py rename to gallery/tutorial/tvmc_python.py From 33c6ea0370ccd010243d1f26cd0a78d9aa35a32d Mon Sep 17 00:00:00 2001 From: jshiue Date: Fri, 17 Dec 2021 23:27:51 -0500 Subject: [PATCH 28/32] changing doc location --- docs/legacy_redirect.py | 4 ++++ gallery/tutorial/tvmc_python.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/legacy_redirect.py b/docs/legacy_redirect.py index 0f1dee6dbf24..56e8d26d0ba3 100644 --- a/docs/legacy_redirect.py +++ b/docs/legacy_redirect.py @@ -242,6 +242,10 @@ "tutorials/get_started/tvmc_command_line_driver.html", "../../tutorial/tvmc_command_line_driver.html", ], + [ + "tutorials/get_started/tvmc_python.html", + "../../tutorial/tvmc_python.html", + ], ] redirect_template = """ diff --git a/gallery/tutorial/tvmc_python.py b/gallery/tutorial/tvmc_python.py index f1793d7a8b2b..b0d3cc305ba3 100644 --- a/gallery/tutorial/tvmc_python.py +++ b/gallery/tutorial/tvmc_python.py @@ -21,7 +21,7 @@ `Jocelyn Shiue `_ Welcome to TVMC Python -====================== +---------------------- Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 Before we get started let's get an example model if you don't already have one. From 68ab249acc71fb607250503f4d85bb8b27fc9574 Mon Sep 17 00:00:00 2001 From: jshiue Date: Sat, 18 Dec 2021 00:09:58 -0500 Subject: [PATCH 29/32] broke the order of the docs somehow --- gallery/tutorial/tvmc_python.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/gallery/tutorial/tvmc_python.py b/gallery/tutorial/tvmc_python.py index b0d3cc305ba3..2c9b4dedc29a 100644 --- a/gallery/tutorial/tvmc_python.py +++ b/gallery/tutorial/tvmc_python.py @@ -20,8 +20,6 @@ **Author**: `Jocelyn Shiue `_ -Welcome to TVMC Python ----------------------- Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 Before we get started let's get an example model if you don't already have one. From cbf6cdc8a4cd295a1c75a597f0a29d47bb340e56 Mon Sep 17 00:00:00 2001 From: jshiue Date: Sat, 18 Dec 2021 00:38:19 -0500 Subject: [PATCH 30/32] fixed it yayy --- docs/conf.py | 2 +- docs/how_to/index.rst | 1 - gallery/tutorial/tvmc_python.py | 10 +++++----- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index cce7f6c63d1b..2f650a88c936 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -255,7 +255,7 @@ def git_describe_version(original_version): "introduction.py", "install.py", "tvmc_command_line_driver.py", - "tvmc_python.py" + "tvmc_python.py", "autotvm_relay_x86.py", "tensor_expr_get_started.py", "autotvm_matmul_x86.py", diff --git a/docs/how_to/index.rst b/docs/how_to/index.rst index 43475bb1f0da..433d7acee95a 100644 --- a/docs/how_to/index.rst +++ b/docs/how_to/index.rst @@ -26,7 +26,6 @@ schedule with tesor expressions?" :maxdepth: 1 compile_models/index - use_tvms_python_api/index deploy/index work_with_relay/index work_with_schedules/index diff --git a/gallery/tutorial/tvmc_python.py b/gallery/tutorial/tvmc_python.py index 2c9b4dedc29a..db70355946c5 100644 --- a/gallery/tutorial/tvmc_python.py +++ b/gallery/tutorial/tvmc_python.py @@ -20,7 +20,7 @@ **Author**: `Jocelyn Shiue `_ -Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 +Hi! Here we explain the scripting tool designed for the complete TVM beginner. 🙂 Before we get started let's get an example model if you don't already have one. Follow the steps to download a resnet model via the terminal: @@ -129,9 +129,9 @@ # tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune # # The terminal output should look like: -# +# # .. code-block:: python -# +# # [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s # [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s # ..... @@ -143,7 +143,7 @@ # results into compile if you want the results to apply. # # .. code-block:: python -# +# # #tvmc.compile(model, target="llvm", tuning_records = "records.log") #Step 2: Compile ################################################################################ @@ -215,7 +215,7 @@ # Use the next generation of tvm to enable potentially faster run speed results. # The search space of the schedules is automatically generated unlike # previously where they needed to be hand written. (Learn more: -# `1 `_, +# `1 `_, # `2 `_) # # .. code-block:: python From 846a6e157fed82485c48a09661f239a251c7d96d Mon Sep 17 00:00:00 2001 From: jshiue Date: Tue, 18 Jan 2022 20:07:00 -0500 Subject: [PATCH 31/32] added additional indentation --- gallery/tutorial/tvmc_python.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gallery/tutorial/tvmc_python.py b/gallery/tutorial/tvmc_python.py index db70355946c5..a62fc2a6c582 100644 --- a/gallery/tutorial/tvmc_python.py +++ b/gallery/tutorial/tvmc_python.py @@ -86,6 +86,7 @@ # In order to compile a model a tvm.target string is required. # To learn more about tvm.targets and their options look at the `documentation `_. # Some examples include: +# # 1. cuda (Nvidia GPU) # 2. llvm (CPU) # 3. llvm -mcpu=cascadelake (Intel CPU) From ac778a4258f9c957d48531b18ccc294695cfc857 Mon Sep 17 00:00:00 2001 From: jshiue Date: Tue, 18 Jan 2022 20:13:40 -0500 Subject: [PATCH 32/32] black'd --- gallery/tutorial/tvmc_python.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/tutorial/tvmc_python.py b/gallery/tutorial/tvmc_python.py index a62fc2a6c582..1f685589730f 100644 --- a/gallery/tutorial/tvmc_python.py +++ b/gallery/tutorial/tvmc_python.py @@ -86,7 +86,7 @@ # In order to compile a model a tvm.target string is required. # To learn more about tvm.targets and their options look at the `documentation `_. # Some examples include: -# +# # 1. cuda (Nvidia GPU) # 2. llvm (CPU) # 3. llvm -mcpu=cascadelake (Intel CPU)