Skip to content

Commit

Permalink
[Unity] Allocate workspace for all functions (apache#15118)
Browse files Browse the repository at this point in the history
Allocate workspace for all functions
  • Loading branch information
yelite authored and junrushao committed Jun 22, 2023
1 parent 54a7b25 commit d87ef34
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 7 deletions.
16 changes: 11 additions & 5 deletions src/relax/transform/allocate_workspace.cc
Original file line number Diff line number Diff line change
Expand Up @@ -125,11 +125,17 @@ class WorkspaceProvider : ExprMutator {
builder_->GetContextIRModule()->Remove(GetRef<GlobalVar>(gvar));
}

auto gvar = mod_->GetGlobalVar("main");
auto func = Downcast<Function>(mod_->Lookup(gvar));
auto new_func = Function(func->params, VisitExpr(func->body), func->ret_struct_info,
func->is_pure, func->attrs);
builder_->UpdateFunction(gvar, new_func);
for (const auto& [gvar, f] : mod_->functions) {
workspace_var_main_ = Var();
if (!f->IsInstance<relax::FunctionNode>() || f->GetAttr<String>(attr::kCodegen) ||
f->GetAttr<String>(attr::kComposite)) {
continue;
}
auto func = Downcast<Function>(mod_->Lookup(gvar));
auto new_func = Function(func->params, VisitExpr(func->body), func->ret_struct_info,
func->is_pure, func->attrs);
builder_->UpdateFunction(gvar, new_func);
}
return builder_->GetContextIRModule();
}

Expand Down
36 changes: 34 additions & 2 deletions tests/python/relax/test_transform_allocate_workspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def gv(
return gv1

@R.function
def main(
def entry_a(
q: R.Tensor((32, 8, 16, 8), dtype="float16"),
k: R.Tensor((32, 8, 16, 8), dtype="float16"),
v: R.Tensor((32, 8, 16, 8), dtype="float16"),
Expand All @@ -68,6 +68,20 @@ def main(
R.output(gv)
return gv

@R.function
def entry_b(
q: R.Tensor((32, 8, 16, 8), dtype="float16"),
k: R.Tensor((32, 8, 16, 8), dtype="float16"),
v: R.Tensor((32, 8, 16, 8), dtype="float16"),
) -> R.Tensor((32, 8, 16, 8), dtype="float16"):
cls = Module
with R.dataflow():
gv: R.Tensor((32, 8, 16, 8), dtype="float16") = cls.fused_relax_nn_attention_cutlass(
q, k, v
) + R.const(1, dtype="float16")
R.output(gv)
return gv


@I.ir_module
class Expected:
Expand Down Expand Up @@ -105,7 +119,7 @@ def gv(
return gv1

@R.function
def main(
def entry_a(
q: R.Tensor((32, 8, 16, 8), dtype="float16"),
k: R.Tensor((32, 8, 16, 8), dtype="float16"),
v: R.Tensor((32, 8, 16, 8), dtype="float16"),
Expand All @@ -122,6 +136,24 @@ def main(
R.output(gv)
return gv

@R.function
def entry_b(
q: R.Tensor((32, 8, 16, 8), dtype="float16"),
k: R.Tensor((32, 8, 16, 8), dtype="float16"),
v: R.Tensor((32, 8, 16, 8), dtype="float16"),
) -> R.Tensor((32, 8, 16, 8), dtype="float16"):
cls = Expected
with R.dataflow():
lv: R.Object = R.vm.alloc_storage(R.shape([65536]), R.prim_value(0), R.dtype("uint8"))
workspace_main: R.Tensor((65536,), dtype="uint8") = R.vm.alloc_tensor(
lv, R.prim_value(0), R.shape([65536]), R.dtype("uint8")
)
gv: R.Tensor((32, 8, 16, 8), dtype="float16") = cls.fused_relax_nn_attention_cutlass1(
q, k, v, workspace_main
) + R.const(1, dtype="float16")
R.output(gv)
return gv


def test_single_attention():
rewritten = relax.transform.AllocateWorkspace()(Module)
Expand Down

0 comments on commit d87ef34

Please sign in to comment.