Skip to content

Commit

Permalink
Addressing comments - 2
Browse files Browse the repository at this point in the history
  • Loading branch information
Giuseppe Rossini committed Dec 10, 2020
1 parent bb39526 commit a51c072
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 29 deletions.
91 changes: 64 additions & 27 deletions python/tvm/driver/tvmc/autotuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,36 @@ def add_tune_parser(subparsers):
default=16,
help="the width of vector units in bytes",
)
auto_scheduler_group.add_argument(
"--max-shared-memory-per-block",
type=int,
default=0,
help="the max shared memory per block in bytes",
)
auto_scheduler_group.add_argument(
"--max-local-memory-per-block",
type=int,
default=0,
help="the max local memory per block in bytes",
)
auto_scheduler_group.add_argument(
"--max-threads-per-block",
type=int,
default=0,
help="the max number of threads per block",
)
auto_scheduler_group.add_argument(
"--max-vthread-extent",
type=int,
default=0,
help="the max vthread extent",
)
auto_scheduler_group.add_argument(
"--warp-size",
type=int,
default=0,
help="the thread numbers of a warp",
)
auto_tuning_group = parser.add_argument_group(
"Autotuning options",
"Autotuning options, used when the autoscheduler is not enabled",
Expand Down Expand Up @@ -206,28 +236,6 @@ def drive_tune(args):
min_repeat_ms = 0 if target.keys[0] == "cpu" else 1000
logger.debug("Default --min-repeat-ms for this target is %s", min_repeat_ms)

if args.enable_autoscheduler:
# Specify hardware parameters
hardware_params = auto_scheduler.HardwareParams(
args.num_cores, args.vector_unit_bytes, args.cache_line_bytes, 0, 0, 0, 0, 0
)
tasks, weights = autoscheduler_get_tuning_tasks(
mod=mod,
params=params,
target=target,
target_host=args.target_host,
alter_layout=args.desired_layout,
hardware_params=hardware_params,
)
else:
tasks = autotuner_get_tuning_tasks(
mod=mod,
params=params,
target=target,
target_host=args.target_host,
alter_layout=args.desired_layout,
)

if args.rpc_tracker:
runner_ctor = auto_scheduler.RPCRunner if args.enable_autoscheduler else autotvm.RPCRunner
runner = runner_ctor(
Expand All @@ -253,6 +261,26 @@ def drive_tune(args):
)

if args.enable_autoscheduler:
# Specify hardware parameters
hardware_params = auto_scheduler.HardwareParams(
args.num_cores,
args.vector_unit_bytes,
args.cache_line_bytes,
args.max_shared_memory_per_block,
args.max_local_memory_per_block,
args.max_threads_per_block,
args.max_vthread_extent,
args.warp_size,
)
tasks, weights = autoscheduler_get_tuning_tasks(
mod=mod,
params=params,
target=target,
target_host=args.target_host,
alter_layout=args.desired_layout,
hardware_params=hardware_params,
)

# Create the autoscheduler tuning options
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=args.trials,
Expand All @@ -268,8 +296,15 @@ def drive_tune(args):
tuning_options,
args.tuning_records,
)

else:
tasks = autotvm_get_tuning_tasks(
mod=mod,
params=params,
target=target,
target_host=args.target_host,
alter_layout=args.desired_layout,
)

tuning_option = {
"tuner": args.tuner,
"trials": args.trials,
Expand All @@ -284,8 +319,8 @@ def drive_tune(args):
tune_tasks(tasks, args.output, **tuning_option)


def autotuner_get_tuning_tasks(mod, params, target, target_host=None, alter_layout=None):
"""Get the tuning tasks for a given relay module.
def autotvm_get_tuning_tasks(mod, params, target, target_host=None, alter_layout=None):
"""Get the autotvm tuning tasks for a given relay module.
Parameters
----------
Expand Down Expand Up @@ -323,7 +358,7 @@ def autotuner_get_tuning_tasks(mod, params, target, target_host=None, alter_layo
def autoscheduler_get_tuning_tasks(
mod, params, target, target_host=None, alter_layout=None, hardware_params=None
):
"""Get the tuning tasks for a given relay module.
"""Get the autoscheduler tuning tasks for a given relay module.
Parameters
----------
Expand All @@ -339,6 +374,8 @@ def autoscheduler_get_tuning_tasks(
The layout to convert the graph to. Note, the convert layout
pass doesn't currently guarantee the whole of the graph will
be converted to the chosen layout.
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
Returns
-------
Expand Down Expand Up @@ -373,7 +410,7 @@ def schedule_tasks(
A list of auto_scheduler.SearchTask to tune.
task_weights : list
The weight (i.e. the number of appearance) of extracted tasks
tuning_options:
tuning_options: dict
The options of tuning
tuning_records : str, optional
The json file used to preload the autoscheduler
Expand Down
1 change: 0 additions & 1 deletion python/tvm/driver/tvmc/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,6 @@ def compile_model(
logger.debug("tuning records file provided: %s", tuning_records)

use_autoscheduler = True

try:
auto_scheduler.load_records(tuning_records)
except tvm._ffi.base.TVMError:
Expand Down
2 changes: 1 addition & 1 deletion tests/python/driver/tvmc/test_autotuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

def _get_tasks(model):
mod, params = tvmc.frontends.load_model(model)
return tvmc.autotuner.get_tuning_tasks(mod, params, "llvm")
return tvmc.autotuner.autotvm_get_tuning_tasks(mod, params, "llvm")


def _get_measure_options():
Expand Down

0 comments on commit a51c072

Please sign in to comment.