Skip to content

Commit

Permalink
[Relay] Remove FTVMCompute from TNonComputational ops (apache#9334)
Browse files Browse the repository at this point in the history
* remove FTVMCompute from noncomputational ops

* Remove injective schedule registration for on_device since it is non-computational

* lint
  • Loading branch information
electriclilies authored and ylc committed Jan 7, 2022
1 parent c400ba5 commit b695480
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 39 deletions.
3 changes: 0 additions & 3 deletions python/tvm/relay/op/_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,6 @@
register_broadcast_schedule("fast_exp")
register_broadcast_schedule("fast_tanh")
register_broadcast_schedule("fast_erf")
# a fake on_device schedule.
# this will not be used in actual computation
register_injective_schedule("on_device")


# zeros
Expand Down
7 changes: 1 addition & 6 deletions src/relay/op/annotation/annotation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -94,12 +94,7 @@ RELAY_REGISTER_OP("on_device")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_type) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<TNonComputational>("TNonComputational", true);

OnDeviceProps GetOnDeviceProps(const CallNode* call_node) {
if (call_node->op == OnDeviceOp()) {
Expand Down
21 changes: 3 additions & 18 deletions src/relay/op/memory/memory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,12 +91,7 @@ RELAY_REGISTER_OP("memory.alloc_storage")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_dtype) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);

Expr AllocTensor(Expr storage, Expr offset, Expr shape, DataType dtype,
Array<IndexExpr> assert_shape) {
Expand Down Expand Up @@ -206,12 +201,7 @@ RELAY_REGISTER_OP("memory.alloc_tensor")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_dtype) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);

bool KillRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
Expand All @@ -230,12 +220,7 @@ RELAY_REGISTER_OP("memory.kill")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_dtype) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);

static void FlattenTupleTypeAux(const Type& type, std::vector<TensorType>* out) {
if (auto tt = type.as<TensorTypeNode>()) {
Expand Down
14 changes: 2 additions & 12 deletions src/relay/op/vm/vm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -138,12 +138,7 @@ RELAY_REGISTER_OP("vm.shape_func")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_dtype) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);

// vm.invoke_tvm_op
bool InvokeTVMOpRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
Expand Down Expand Up @@ -188,12 +183,7 @@ RELAY_REGISTER_OP("vm.invoke_tvm_op")
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_dtype) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);

// vm.reshape
TVM_REGISTER_NODE_TYPE(ReshapeTensorAttrs);
Expand Down

0 comments on commit b695480

Please sign in to comment.