Skip to content
This repository has been archived by the owner on Nov 25, 2022. It is now read-only.

Commit

Permalink
[TEST] Fix the broken VNNI MetaSchedule test (apache#13067)
Browse files Browse the repository at this point in the history
* fixed MS vnni template-based tuning test

* enable local testing of dp4a auto tensorize

* fix rocm and vk auto-tensorize test

* tweaking to see why IS_IN_CI isn't working

* skip dp4a auto tensorize test since IS_IN_CI is not working

* fix broken hexagon test after onnx model update
  • Loading branch information
masahi authored and xinetzone committed Nov 25, 2022
1 parent 2556831 commit 0c7d354
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 15 deletions.
4 changes: 2 additions & 2 deletions tests/python/contrib/test_hexagon/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def test_mobilenet(hexagon_session: Session):

data_in = np.random.rand(1, 3, 224, 224).astype(dtype=dtype)

input_name = "input"
input_name = "data"
shape_dict = {input_name: data_in.shape}
relay_mod, params = relay.frontend.from_onnx(onnx_model, shape_dict, freeze_params=True)
inputs = {input_name: data_in}
Expand Down Expand Up @@ -98,7 +98,7 @@ def test_mobilenet_aot(hexagon_session: Session, aot_host_target, aot_target, en

data_in = np.random.rand(1, 3, 224, 224).astype(dtype=dtype)

input_name = "input"
input_name = "data"
shape_dict = {input_name: data_in.shape}
relay_mod, params = relay.frontend.from_onnx(onnx_model, shape_dict, freeze_params=True)
inputs = {input_name: data_in}
Expand Down
16 changes: 8 additions & 8 deletions tests/python/integration/test_auto_tensorize.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,10 +283,10 @@ def test_dp4a_dense():
_test_dense("int8", SCH_RULES_FOR_DP4A, POSTPROCS_FOR_DP4A, "nvidia/geforce-rtx-3070")
# Uncomment to test on vulkan or rocm target
# _test_dense(
# "int8", sch_rules_for_dp4a, postprocs_for_dp4a, "vulkan -from_device=0"
# "int8", SCH_RULES_FOR_DP4A, POSTPROCS_FOR_DP4A, "vulkan -from_device=0"
# )
# _test_dense(
# "int8", sch_rules_for_sdot4, postprocs_for_dp4a, "rocm"
# "int8", SCH_RULES_FOR_SDOT4, POSTPROCS_FOR_DP4A, "rocm"
# )


Expand All @@ -303,10 +303,10 @@ def test_dp4a_conv2d():
_test_conv2d("int8", SCH_RULES_FOR_DP4A, POSTPROCS_FOR_DP4A, "nvidia/geforce-rtx-3070")
# Uncomment to test on vulkan or rocm target
# _test_conv2d(
# "int8", sch_rules_for_dp4a, postprocs_for_dp4a, "vulkan -from_device=0"
# "int8", SCH_RULES_FOR_DP4A, POSTPROCS_FOR_DP4A, "vulkan -from_device=0"
# )
# _test_conv2d(
# "int8", sch_rules_for_sdot4, postprocs_for_dp4a, "rocm"
# "int8", SCH_RULES_FOR_SDOT4, POSTPROCS_FOR_DP4A, "rocm"
# )


Expand Down Expand Up @@ -342,16 +342,16 @@ def test_dp4a_bert_int8():
# params,
# input_info,
# "vulkan -from_device=0",
# sch_rules_for_dp4a,
# postprocs_for_dp4a,
# SCH_RULES_FOR_DP4A,
# POSTPROCS_FOR_DP4A,
# )
# _test_bert_int8(
# relay_mod,
# params,
# input_info,
# "rocm",
# sch_rules_for_sdot4,
# postprocs_for_dp4a,
# SCH_RULES_FOR_SDOT4
# POSTPROCS_FOR_DP4A,
# )


Expand Down
11 changes: 6 additions & 5 deletions tests/python/unittest/test_meta_schedule_vnni_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def _schedule_dense(m: Optional[int], do_tune: bool):
"""

def schedule_fn(sch, dense_block: Optional[BlockRV] = None) -> bool:
if "dense" not in sch.mod.attrs["task_name"]:
if sch.mod.attrs is not None and "dense" not in sch.mod.attrs["task_name"]:
return False
if dense_block is None:
dense_block = sch.get_block("compute")
Expand Down Expand Up @@ -204,7 +204,7 @@ def schedule_rule_dense_vnni(sch: Schedule, dense_block: BlockRV):
with tempfile.TemporaryDirectory() as work_dir:
# postprocs=lambda: [] is important to prevent default post processors from
# tampering with the manual schedule.
tasks = ms.relay_integration.extracted_tasks_to_tune_contexts(
tasks, weights = ms.relay_integration.extracted_tasks_to_tune_contexts(
list(
filter(
lambda task: "dense" in task.task_name,
Expand All @@ -214,15 +214,16 @@ def schedule_rule_dense_vnni(sch: Schedule, dense_block: BlockRV):
work_dir=work_dir,
space=ms.space_generator.PostOrderApply(
f_block_filter=None,
sch_rules=None,
sch_rules="from-target",
postprocs=[],
mutator_probs=None,
mutator_probs="from-target",
),
)
database = ms.relay_integration.tune_tasks(
tasks=tasks,
task_weights=[1.0] * len(tasks),
task_weights=weights,
work_dir=work_dir,
max_trials_per_task=32,
max_trials_global=20000,
)
with database, tvm.transform.PassContext(
Expand Down

0 comments on commit 0c7d354

Please sign in to comment.