Skip to content

Commit

Permalink
[TARGET] Move target_host usage to new target style. (#9497)
Browse files Browse the repository at this point in the history
- Add deprecation warnings to functions with target_host parameters.
- Update the build usage to new target style.
  • Loading branch information
tqchen authored Nov 12, 2021
1 parent 6159b8e commit 137def8
Show file tree
Hide file tree
Showing 43 changed files with 162 additions and 77 deletions.
2 changes: 1 addition & 1 deletion apps/android_camera/models/prepare_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def main(model_str, output_path):
pass
print("building...")
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(net, target, target_host=target_host, params=params)
graph, lib, params = relay.build(net, tvm.target.Target(target, target_host), params=params)
print("dumping lib...")
lib.export_library(output_path_str + "/" + "deploy_lib_cpu.so", ndk.create_shared)
print("dumping graph...")
Expand Down
4 changes: 2 additions & 2 deletions apps/android_rpc/tests/android_rpc_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def test_rpc_module():
s[B].bind(xo, te.thread_axis("blockIdx.x"))
# Build the dynamic lib.
# If we don't want to do metal and only use cpu, just set target to be target
f = tvm.build(s, [A, B], "opencl", target_host=target, name="myadd")
f = tvm.build(s, [A, B], tvm.target.Target("opencl", host=target), name="myadd")
path_dso_cl = temp.relpath("dev_lib_cl.so")
f.export_library(path_dso_cl, ndk.create_shared)

Expand All @@ -109,7 +109,7 @@ def test_rpc_module():
s[B].bind(xo, te.thread_axis("blockIdx.x"))
# Build the dynamic lib.
# If we don't want to do metal and only use cpu, just set target to be target
f = tvm.build(s, [A, B], "vulkan", target_host=target, name="myadd")
f = tvm.build(s, [A, B], tvm.target.Target("vulkan", host=target), name="myadd")
path_dso_vulkan = temp.relpath("dev_lib_vulkan.so")
f.export_library(path_dso_vulkan, ndk.create_shared)

Expand Down
2 changes: 1 addition & 1 deletion apps/benchmark/arm_cpu_imagenet_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def evaluate_network(network, target, target_host, repeat):

print_progress("%-20s building..." % network)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(net, target=target, target_host=target_host, params=params)
lib = relay.build(net, target=tvm.target.Target(target, host=target_host), params=params)

tmp = tempdir()
if "android" in str(target):
Expand Down
2 changes: 1 addition & 1 deletion apps/benchmark/mobile_gpu_imagenet_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def evaluate_network(network, target, target_host, dtype, repeat):

print_progress("%-20s building..." % network)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(net, target=target, target_host=target_host, params=params)
lib = relay.build(net, target=tvm.target.Target(target, host=target_host), params=params)

tmp = tempdir()
if "android" in str(target) or "android" in str(target_host):
Expand Down
18 changes: 9 additions & 9 deletions apps/hexagon_launcher/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
## Compilation

The launcher consists of two parts: part running on Hexagon, and part running
on Android. Each component must be compiled separately.
on Android. Each component must be compiled separately.

The supported Snapdragon architectures are 855, 865, and 888.

Expand All @@ -37,9 +37,9 @@ Building the Hexagon launcher application as a component of the main TVM build
used for Hexagon codegen can be achieved by setting `USE_HEXAGON_LAUNCHER=ON`.
This option will compile core tvm, the android launcher binary and its corresponding
tvm_runtime, as well as the Hexagon launcher shared library and its corresponding
tvm_runtime. As described in the [Manual compilation](#Manual compilation) section
each component requires Hexagon and android dependencies. When building the launcher
along with TVM these configurations must be providing when invoking cmake. A minimal
tvm_runtime. As described in the [Manual compilation](#Manual compilation) section
each component requires Hexagon and android dependencies. When building the launcher
along with TVM these configurations must be providing when invoking cmake. A minimal
example invocation for compiling TVM along with the Hexagon launcher is included below:

```
Expand All @@ -59,9 +59,9 @@ cmake -DCMAKE_C_COMPILER=/path/to/clang \
```

where `v65|v66|v68` means "one of" these architecture versions.
The Hexagon launcher application is an android binary and thus requires the use
of an android toolchain for compilation. Similarly, the Hexagon tvm runtime
requires the use of the Hexagon toolchain and depends on the Hexagon SDK. The
The Hexagon launcher application is an android binary and thus requires the use
of an android toolchain for compilation. Similarly, the Hexagon tvm runtime
requires the use of the Hexagon toolchain and depends on the Hexagon SDK. The
resulting hexagon launcher binaries can be found in the `apps_hexagon_launcher`
subdirectory of the cmake build directory. Please note that the above command
will not build support for Hexagon codegen in the TVM library, for that please
Expand All @@ -70,7 +70,7 @@ additionally define the `USE_HEXAGON_DEVICE` variable. Also, the LLVM used in

### Manual compilation

Since some source files are shared between the Hexagon and android builds,
Since some source files are shared between the Hexagon and android builds,
make sure to delete all object files between compilations. Compile the Hexagon
code first.

Expand Down Expand Up @@ -157,7 +157,7 @@ mod, params = relay.frontend.from_tflite(
target = tvm.target.hexagon('v68', link_params=True)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, target_host=target, params=params, mod_name="default")
lib = relay.build(mod, tvm.target.Target(target, host=target), params=params, mod_name="default")
# Save model.so and model.json:
with open('model.json', 'w') as f:
Expand Down
4 changes: 3 additions & 1 deletion apps/ios_rpc/tests/ios_rpc_mobilenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,9 @@ def test_mobilenet(host, port, key, mode):

def run(mod, target):
with relay.build_config(opt_level=3):
lib = relay.build(mod, target=target, target_host=target_host, params=params)
lib = relay.build(
mod, target=tvm.target.Target(target, host=target_host), params=params
)
path_dso = temp.relpath("deploy.dylib")
lib.export_library(path_dso, xcode.create_dylib, arch=arch, sdk=sdk)

Expand Down
2 changes: 1 addition & 1 deletion apps/ios_rpc/tests/ios_rpc_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def test_rpc_module(host, port, key, mode):
s[B].bind(xo, te.thread_axis("blockIdx.x"))
# Build the dynamic lib.
# If we don't want to do metal and only use cpu, just set target to be target
f = tvm.build(s, [A, B], "metal", target_host=target, name="myadd")
f = tvm.build(s, [A, B], tvm.target.Target("metal", host=target), name="myadd")
path_dso1 = temp.relpath("dev_lib.dylib")
f.export_library(path_dso1, xcode.create_dylib, arch=arch, sdk=sdk)

Expand Down
2 changes: 1 addition & 1 deletion apps/topi_recipe/gemm/android_gemm_square.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def test_gemm_gpu(N, times, bn, num_block, num_thread):

print(tvm.lower(s, [A, B, C], simple_mode=True))

f = tvm.build(s, [A, B, C], "opencl", target_host=target, name="gemm_gpu")
f = tvm.build(s, [A, B, C], tvm.target.Target("opencl", host=target), name="gemm_gpu")
temp = utils.tempdir()
path_dso = temp.relpath("gemm_gpu.so")
f.export_library(path_dso, ndk.create_shared)
Expand Down
2 changes: 1 addition & 1 deletion docs/how_to/deploy/bnns.rst
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ After that you need to compile new module with target corresponding to required
model = partition_for_bnns(model, params=params) # to markup operations to be offloaded to BNNS
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(model, target=target, target_host=target, params=params)
lib = relay.build(model, target=target, params=params)
Export the module.

Expand Down
5 changes: 2 additions & 3 deletions docs/how_to/deploy/hls.rst
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,7 @@ We use two python scripts for this tutorial.
import tvm
from tvm import te
tgt_host="llvm"
tgt="sdaccel"
tgt= tvm.target.Target("sdaccel", host="llvm")
n = te.var("n")
A = te.placeholder((n,), name='A')
Expand All @@ -47,7 +46,7 @@ We use two python scripts for this tutorial.
s[C].bind(px, tvm.te.thread_axis("pipeline"))
fadd = tvm.build(s, [A, B, C], tgt, target_host=tgt_host, name="myadd")
fadd = tvm.build(s, [A, B, C], tgt, name="myadd")
fadd.save("myadd.o")
fadd.imported_modules[0].save("myadd.xclbin")
Expand Down
3 changes: 1 addition & 2 deletions golang/sample/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@

# Global declarations of environment.

tgt_host = "llvm"
tgt = "llvm"

######################################################################
Expand All @@ -45,7 +44,7 @@
######################################################################
# Compilation
# -----------
fadd = tvm.build(s, [A, B, C], tgt, target_host=tgt_host, name="myadd")
fadd = tvm.build(s, [A, B, C], tgt, name="myadd")

######################################################################
# Save Compiled Module
Expand Down
2 changes: 1 addition & 1 deletion jvm/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def test_add(target_dir):
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], "llvm", target_host="llvm", name="myadd")
fadd = tvm.build(s, [A, B, C], "llvm", name="myadd")

fadd.save(os.path.join(target_dir, "add_cpu.o"))
cc.create_shared(os.path.join(target_dir, "add_cpu.so"),
Expand Down
2 changes: 1 addition & 1 deletion jvm/core/src/test/scripts/test_add_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def test_add(target_dir):
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], "llvm", target_host="llvm", name="myadd")
fadd = tvm.build(s, [A, B, C], "llvm", name="myadd")

fadd.save(os.path.join(target_dir, "add_cpu.o"))
cc.create_shared(
Expand Down
2 changes: 1 addition & 1 deletion jvm/core/src/test/scripts/test_add_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def test_add(target_dir):
bx, tx = s[C].split(C.op.axis[0], factor=64)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
fadd_cuda = tvm.build(s, [A, B, C], "cuda", target_host="llvm", name="myadd")
fadd_cuda = tvm.build(s, [A, B, C], tvm.target.Target("cuda", host="llvm"), name="myadd")

fadd_cuda.save(os.path.join(target_dir, "add_cuda.o"))
fadd_cuda.imported_modules[0].save(os.path.join(target_dir, "add_cuda.ptx"))
Expand Down
6 changes: 6 additions & 0 deletions python/tvm/auto_scheduler/relay_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import json
import logging
import threading
import warnings

import tvm
from tvm import autotvm, transform
Expand Down Expand Up @@ -109,6 +110,11 @@ def extract_tasks(
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)

target, target_host = Target.check_and_update_host_consist(target, target_host)

Expand Down
6 changes: 6 additions & 0 deletions python/tvm/autotvm/task/relay_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
"""
import threading
import logging
import warnings

import tvm
from tvm.autotvm.task.dispatcher import DispatchContext, FallbackContext
Expand Down Expand Up @@ -77,6 +78,11 @@ def extract_from_program(mod, params, target, target_host=None, ops=None):
task: Array of autotvm.task.Task
collected tasks
"""
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
target, target_host = Target.check_and_update_host_consist(target, target_host)
return extract_from_multiple_program([mod], [params], target, ops=ops)

Expand Down
9 changes: 8 additions & 1 deletion python/tvm/driver/build_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

# pylint: disable=invalid-name
"""The build utils in python."""
import warnings

from typing import Union, Optional, List, Mapping

Expand Down Expand Up @@ -206,7 +207,7 @@ def build(
s2 = topi.cuda.schedule_injective(cuda_tgt, [C])
m1 = tvm.lower(s1, [A, B, C], name="test_add1")
m2 = tvm.lower(s2, [A, B, C], name="test_add2")
rt_mod = tvm.build({"llvm": m1, "cuda": m2}, target_host="llvm")
rt_mod = tvm.build({"llvm": m1, "cuda": m2})
Note
----
Expand All @@ -229,6 +230,12 @@ def build(
f"but got {type(inputs)}."
)

if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)

if not isinstance(inputs, (dict, container.Map)):
target = Target.current() if target is None else target
target = target if target else "llvm"
Expand Down
17 changes: 17 additions & 0 deletions python/tvm/relay/backend/vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
Implements a Python interface to compiling and executing on the Relay VM.
"""
import warnings

import numpy as np

import tvm
Expand Down Expand Up @@ -63,6 +65,11 @@ def compile(mod, target=None, target_host=None, params=None):
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
target, target_host = Target.check_and_update_host_consist(
target, target_host, target_is_dict_key=False
)
Expand Down Expand Up @@ -132,6 +139,11 @@ def lower(self, mod, target=None, target_host=None):
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
"""
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
target = self._update_target(target)
target_host = self._update_target_host(target, target_host)
target, target_host = Target.check_and_update_host_consist(
Expand Down Expand Up @@ -173,6 +185,11 @@ def optimize(self, mod, target=None, target_host=None, params=None):
params : dict
The parameters of the final module.
"""
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
target = self._update_target(target)
target_host = self._update_target_host(target, target_host)
target, target_host = Target.check_and_update_host_consist(
Expand Down
20 changes: 16 additions & 4 deletions python/tvm/relay/build_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,11 @@ def build(
params : dict
The parameters of the final graph.
"""
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
target = build_target_by_device_type_map(target)
target, target_host = Target.check_and_update_host_consist(
target, target_host, target_is_dict_key=False
Expand Down Expand Up @@ -332,16 +337,23 @@ def build(ir_mod, target=None, target_host=None, params=None, mod_name="default"
"instead of deprecated parameter mod (tvm.relay.function.Function)",
DeprecationWarning,
)

if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)

target, target_host = Target.check_and_update_host_consist(
target, target_host, target_is_dict_key=False
)

target = build_target_by_device_type_map(target)
if isinstance(target_host, (str, Target)):
target_host = Target(target_host)
elif target_host:
raise ValueError("target host must be the type of str, " + "tvm.target.Target, or None")

target, target_host = Target.check_and_update_host_consist(
target, target_host, target_is_dict_key=False
)

# Retrieve the executor from the target
executor = get_executor_from_target(target, target_host)

Expand Down
2 changes: 1 addition & 1 deletion rust/tvm/tests/basics/src/tvm_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def main(target, out_dir):
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))

fadd = tvm.build(s, [A, B, C], target, target_host="llvm", name="myadd")
fadd = tvm.build(s, [A, B, C], tvm.target.Target(target, host="llvm"), name="myadd")
fadd.save(osp.join(out_dir, "test_add.o"))
if target == "cuda":
fadd.imported_modules[0].save(osp.join(out_dir, "test_add.ptx"))
Expand Down
2 changes: 1 addition & 1 deletion tests/python/contrib/test_bnns/infrastructure.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def build_module(mod, target, params=None, enable_bnns=True, tvm_ops=0):
if enable_bnns:
mod = partition_for_bnns(mod)
relay.backend.te_compiler.get().clear()
return relay.build(mod, target=target, target_host=target, params=params)
return relay.build(mod, target=target, params=params)


def build_and_run(
Expand Down
2 changes: 1 addition & 1 deletion tests/python/contrib/test_bnns/test_onnx_topologies.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def run(mod, target, simplify=True, with_bnns=False):
mod = simplify_model(mod)
if with_bnns:
mod = partition_for_bnns(mod)
graph_module = relay.build(mod, target=target, target_host=target, params=params)
graph_module = relay.build(mod, target=target, params=params)

lib_name = "deploy.tar"
path_dso = temp.relpath(lib_name)
Expand Down
4 changes: 3 additions & 1 deletion tests/python/contrib/test_hexagon/infrastructure.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,9 @@ def get_packed_filter_layout(out_channel, in_channel, kernel_h, kernel_w):
def build_and_run(inputs, func, target, target_host, *args, **kwargs):
schedule, placeholders, binds = func(*args, **kwargs)

func = tvm.build(schedule, placeholders, target=target, target_host=target_host, binds=binds)
func = tvm.build(
schedule, placeholders, target=tvm.target.Target(target, host=target_host), binds=binds
)
dev = tvm.device(target)
tensors = []
for tensor in inputs:
Expand Down
Loading

0 comments on commit 137def8

Please sign in to comment.