Skip to content

Commit

Permalink
Fix a few OpNode argument field descriptions when registered (apache#…
Browse files Browse the repository at this point in the history
  • Loading branch information
domin1985 authored Dec 21, 2020
1 parent 82942fb commit 53c0641
Show file tree
Hide file tree
Showing 8 changed files with 34 additions and 3 deletions.
6 changes: 6 additions & 0 deletions src/relay/op/annotation/annotation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ TVM_REGISTER_GLOBAL("relay.op.annotation._make.on_device")
RELAY_REGISTER_OP("on_device")
.describe(R"code(Annotate an expression with device type)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input data.")
.set_support_level(10)
.add_type_rel("Identity", IdentityRel)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
Expand Down Expand Up @@ -117,6 +118,7 @@ RELAY_REGISTER_OP("annotation.bitpack_start")
Mark the start of bitpacking.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input data.")
.set_support_level(10)
.add_type_rel("Identity", IdentityRel)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
Expand All @@ -133,6 +135,7 @@ RELAY_REGISTER_OP("annotation.bitpack_end")
Mark the end of bitpacking.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input data.")
.set_support_level(10)
.add_type_rel("Identity", IdentityRel)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
Expand All @@ -155,6 +158,7 @@ Mark a checkpoint for checkpointing memory optimization.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.set_support_level(10)
.add_argument("data", "Tensor", "The input data.")
.add_type_rel("Identity", IdentityRel)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
Expand All @@ -176,6 +180,7 @@ RELAY_REGISTER_OP("annotation.compiler_begin")
Beginning of a region that is handled by a given compiler.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input data.")
.set_support_level(10)
.add_type_rel("Identity", IdentityRel)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
Expand All @@ -200,6 +205,7 @@ RELAY_REGISTER_OP("annotation.compiler_end")
End of a region that is handled by a given compiler.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input data.")
.set_support_level(10)
.add_type_rel("Identity", IdentityRel)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
Expand Down
1 change: 1 addition & 0 deletions src/relay/op/device_copy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ Copy data from one tensor to another. The source and destination might be
on different devices.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input data.")
.set_support_level(10)
.add_type_rel("Identity", IdentityRel)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
Expand Down
3 changes: 2 additions & 1 deletion src/relay/op/image/grid_sample.cc
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ grid_sample often cooperates with affine_grid which generates sampling grids for
(batch_size, channels, in_height, in_width) for NCHW
(batch_size, in_height, in_width, channels) for NHWC
- **grid**: out is 4D array of shape [batch, 2, out_height, out_width], where each vector
- **grid**: grid is 4D array of shape [batch, 2, out_height, out_width], where each vector
:math:`out[b, :, h, w]` represents the coordinate :math:`(x, y)`
- **out**: out is 4D array of shape
Expand All @@ -160,6 +160,7 @@ grid_sample often cooperates with affine_grid which generates sampling grids for
.set_num_inputs(2)
.set_attrs_type<GridSampleAttrs>()
.add_argument("data", "Tensor", "The input tensor.")
.add_argument("grid", "Tensor", "The grid tensor.")
.set_support_level(5)
.add_type_rel("GridSample", GridSampleRel)
.set_attr<TOpPattern>("TOpPattern", kInjective);
Expand Down
2 changes: 1 addition & 1 deletion src/relay/op/memory/memory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ bool KillRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,

RELAY_REGISTER_OP("memory.kill")
.describe(R"code(Mark a tensor for release to the allocator.)code" TVM_ADD_FILELINE)
.set_num_inputs(3)
.set_num_inputs(1)
.add_argument("to_free", "Tensor", "The tensor to free.")
.add_type_rel("Kill", KillRel)
.set_support_level(10)
Expand Down
2 changes: 2 additions & 0 deletions src/relay/op/nn/pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -765,6 +765,7 @@ RELAY_REGISTER_OP("nn.max_pool2d_grad")
.set_attrs_type<MaxPool2DAttrs>()
.set_num_inputs(2)
.add_argument("data", "Tensor", "The input tensor.")
.add_argument("grad", "Tensor", "The grad tensor.")
.set_support_level(2)
.add_type_rel("MaxPool2DGrad", Pool2DGradRel)
.set_attr<FTVMCompute>("FTVMCompute", Pool2DGradCompute<MaxPool2DAttrs, topi::nn::kMaxPool>);
Expand Down Expand Up @@ -812,6 +813,7 @@ RELAY_REGISTER_OP("nn.avg_pool2d_grad")
.set_attrs_type<MaxPool2DAttrs>()
.set_num_inputs(2)
.add_argument("data", "Tensor", "The input tensor.")
.add_argument("grad", "Tensor", "The grad tensor.")
.set_support_level(2)
.add_type_rel("MaxPool2DGrad", Pool2DGradRel)
.set_attr<FTVMCompute>("FTVMCompute", Pool2DGradCompute<AvgPool2DAttrs, topi::nn::kAvgPool>);
Expand Down
7 changes: 7 additions & 0 deletions src/relay/op/tensor/transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1041,6 +1041,7 @@ Given data with shape (Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1}) and indices with sh
)code" TVM_ADD_FILELINE)
.set_num_inputs(2)
.add_argument("data", "Tensor", "The input tensor.")
.add_argument("indices", "Tensor", "The indices tensor.")
.set_support_level(3)
.add_type_rel("ScatterND", ScatterNDRel)
.set_attr<TOpPattern>("TOpPattern", kInjective);
Expand Down Expand Up @@ -1388,6 +1389,9 @@ RELAY_REGISTER_OP("arange")
)code" TVM_ADD_FILELINE)
.set_attrs_type<ArangeAttrs>()
.set_num_inputs(3)
.add_argument("start", "Expr", "Start of interval. The interval includes this value.")
.add_argument("end", "Expr", "Stop of interval. The interval does not include this value.")
.add_argument("step", "Expr", "Spacing between values.")
.set_support_level(3)
.add_type_rel("Arange", ArangeRel)
.set_attr<FTVMCompute>("FTVMCompute", ArangeCompute)
Expand Down Expand Up @@ -3030,6 +3034,7 @@ output shape will simply be (Y_0, ..., Y_{K-1}).
)code" TVM_ADD_FILELINE)
.set_num_inputs(2)
.add_argument("data", "Tensor", "The input tensor.")
.add_argument("indices", "Tensor", "The indices of values to gather.")
.set_support_level(3)
.add_type_rel("GatherND", GatherNDRel)
.set_attr<FTVMCompute>("FTVMCompute", GatherNDCompute)
Expand Down Expand Up @@ -3260,6 +3265,8 @@ Example::
- unravel_index([22, 41, 37], (7, 6)) = [[3, 6, 6], [4, 5, 1]]
)code" TVM_ADD_FILELINE)
.set_num_inputs(2)
.add_argument("data", "Tensor", "The input tensor.")
.add_argument("shape", "Tensor", "The shape tensor.")
.set_support_level(3)
.add_type_rel("UnRavelIndexRel", UnRavelIndexRel)
.set_attr<FTVMCompute>("FTVMCompute", UnRavelIndexCompute)
Expand Down
4 changes: 3 additions & 1 deletion src/relay/op/vm/vm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,9 @@ bool ShapeFuncRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
RELAY_REGISTER_OP("vm.shape_func")
.describe(R"code(Get the shape of a tensor.)code" TVM_ADD_FILELINE)
.set_num_inputs(3)
.add_argument("tensor", "Tensor", "The tensor to retrieve the shape for.")
.add_argument("func", "Function", "The operation to call")
.add_argument("ins", "Tuple", "The input tensors.")
.add_argument("outs", "Tuple", "The output tensors.")
.add_type_rel("ShapeFuncRel", ShapeFuncRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
Expand Down
12 changes: 12 additions & 0 deletions src/target/source/intrin_rule_cuda.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,18 +186,30 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.cuda.fmod").set_body(DispatchPureExtern<CUD
// TODO(tvm-team): consider make CUDA its own subfolder and create a file for low-level builtins.
TVM_REGISTER_OP("tir.cuda.__shfl_sync")
.set_num_inputs(4)
.add_argument("mask", "Expr", "The thread mask.")
.add_argument("var", "Expr", "The variable to sync.")
.add_argument("lane", "Expr", "The source thread id.")
.add_argument("width", "Expr", "The warp thread width, must be a power of 2.")
.set_attr<TGlobalSymbol>("TGlobalSymbol", "__shfl_sync")
.set_attr<TCallEffectKind>("TCallEffectKind", Integer(CallEffectKind::kOpaque))
.set_attr<bool>("cuda.need_warp_shuffle", true);

TVM_REGISTER_OP("tir.cuda.__shfl_up_sync")
.set_num_inputs(4)
.add_argument("mask", "Expr", "The thread mask.")
.add_argument("var", "Expr", "The variable to sync.")
.add_argument("delta", "Expr", "The source lane id offset to be added.")
.add_argument("width", "Expr", "The warp thread width, must be a power of 2.")
.set_attr<TGlobalSymbol>("TGlobalSymbol", "__shfl_up_sync")
.set_attr<TCallEffectKind>("TCallEffectKind", Integer(CallEffectKind::kOpaque))
.set_attr<bool>("cuda.need_warp_shuffle", true);

TVM_REGISTER_OP("tir.cuda.__shfl_down_sync")
.set_num_inputs(4)
.add_argument("mask", "Expr", "The thread mask.")
.add_argument("var", "Expr", "The variable to sync.")
.add_argument("delta", "Expr", "The source lane id offset to be subtracted.")
.add_argument("width", "Expr", "The warp thread width, must be a power of 2.")
.set_attr<TGlobalSymbol>("TGlobalSymbol", "__shfl_down_sync")
.set_attr<TCallEffectKind>("TCallEffectKind", Integer(CallEffectKind::kOpaque))
.set_attr<bool>("cuda.need_warp_shuffle", true);
Expand Down

0 comments on commit 53c0641

Please sign in to comment.