From 53c06410bd53b845ff3ece2a95b7add44cdf1f52 Mon Sep 17 00:00:00 2001 From: Dongming Yang Date: Tue, 22 Dec 2020 04:49:52 +0800 Subject: [PATCH] Fix a few OpNode argument field descriptions when registered (#7140) --- src/relay/op/annotation/annotation.cc | 6 ++++++ src/relay/op/device_copy.cc | 1 + src/relay/op/image/grid_sample.cc | 3 ++- src/relay/op/memory/memory.cc | 2 +- src/relay/op/nn/pooling.cc | 2 ++ src/relay/op/tensor/transform.cc | 7 +++++++ src/relay/op/vm/vm.cc | 4 +++- src/target/source/intrin_rule_cuda.cc | 12 ++++++++++++ 8 files changed, 34 insertions(+), 3 deletions(-) diff --git a/src/relay/op/annotation/annotation.cc b/src/relay/op/annotation/annotation.cc index 74c8c69ea471..60a2e95cdcf7 100644 --- a/src/relay/op/annotation/annotation.cc +++ b/src/relay/op/annotation/annotation.cc @@ -50,6 +50,7 @@ TVM_REGISTER_GLOBAL("relay.op.annotation._make.on_device") RELAY_REGISTER_OP("on_device") .describe(R"code(Annotate an expression with device type)code" TVM_ADD_FILELINE) .set_num_inputs(1) + .add_argument("data", "Tensor", "The input data.") .set_support_level(10) .add_type_rel("Identity", IdentityRel) .set_attr("TOpPattern", kOpaque) @@ -117,6 +118,7 @@ RELAY_REGISTER_OP("annotation.bitpack_start") Mark the start of bitpacking. )code" TVM_ADD_FILELINE) .set_num_inputs(1) + .add_argument("data", "Tensor", "The input data.") .set_support_level(10) .add_type_rel("Identity", IdentityRel) .set_attr("TOpPattern", kOpaque) @@ -133,6 +135,7 @@ RELAY_REGISTER_OP("annotation.bitpack_end") Mark the end of bitpacking. )code" TVM_ADD_FILELINE) .set_num_inputs(1) + .add_argument("data", "Tensor", "The input data.") .set_support_level(10) .add_type_rel("Identity", IdentityRel) .set_attr("TOpPattern", kOpaque) @@ -155,6 +158,7 @@ Mark a checkpoint for checkpointing memory optimization. )code" TVM_ADD_FILELINE) .set_num_inputs(1) .set_support_level(10) + .add_argument("data", "Tensor", "The input data.") .add_type_rel("Identity", IdentityRel) .set_attr("TOpPattern", kOpaque) .set_attr("TOpIsStateful", false) @@ -176,6 +180,7 @@ RELAY_REGISTER_OP("annotation.compiler_begin") Beginning of a region that is handled by a given compiler. )code" TVM_ADD_FILELINE) .set_num_inputs(1) + .add_argument("data", "Tensor", "The input data.") .set_support_level(10) .add_type_rel("Identity", IdentityRel) .set_attr("TOpPattern", kOpaque) @@ -200,6 +205,7 @@ RELAY_REGISTER_OP("annotation.compiler_end") End of a region that is handled by a given compiler. )code" TVM_ADD_FILELINE) .set_num_inputs(1) + .add_argument("data", "Tensor", "The input data.") .set_support_level(10) .add_type_rel("Identity", IdentityRel) .set_attr("TOpPattern", kOpaque) diff --git a/src/relay/op/device_copy.cc b/src/relay/op/device_copy.cc index b26dc879be0a..997eec5a333f 100644 --- a/src/relay/op/device_copy.cc +++ b/src/relay/op/device_copy.cc @@ -57,6 +57,7 @@ Copy data from one tensor to another. The source and destination might be on different devices. )code" TVM_ADD_FILELINE) .set_num_inputs(1) + .add_argument("data", "Tensor", "The input data.") .set_support_level(10) .add_type_rel("Identity", IdentityRel) .set_attr("TOpPattern", kOpaque) diff --git a/src/relay/op/image/grid_sample.cc b/src/relay/op/image/grid_sample.cc index d5fa68aed82a..38d4c8103ff2 100644 --- a/src/relay/op/image/grid_sample.cc +++ b/src/relay/op/image/grid_sample.cc @@ -149,7 +149,7 @@ grid_sample often cooperates with affine_grid which generates sampling grids for (batch_size, channels, in_height, in_width) for NCHW (batch_size, in_height, in_width, channels) for NHWC -- **grid**: out is 4D array of shape [batch, 2, out_height, out_width], where each vector +- **grid**: grid is 4D array of shape [batch, 2, out_height, out_width], where each vector :math:`out[b, :, h, w]` represents the coordinate :math:`(x, y)` - **out**: out is 4D array of shape @@ -160,6 +160,7 @@ grid_sample often cooperates with affine_grid which generates sampling grids for .set_num_inputs(2) .set_attrs_type() .add_argument("data", "Tensor", "The input tensor.") + .add_argument("grid", "Tensor", "The grid tensor.") .set_support_level(5) .add_type_rel("GridSample", GridSampleRel) .set_attr("TOpPattern", kInjective); diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index dc5a1ebd3c73..c0edf467815a 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -206,7 +206,7 @@ bool KillRel(const Array& types, int num_inputs, const Attrs& attrs, RELAY_REGISTER_OP("memory.kill") .describe(R"code(Mark a tensor for release to the allocator.)code" TVM_ADD_FILELINE) - .set_num_inputs(3) + .set_num_inputs(1) .add_argument("to_free", "Tensor", "The tensor to free.") .add_type_rel("Kill", KillRel) .set_support_level(10) diff --git a/src/relay/op/nn/pooling.cc b/src/relay/op/nn/pooling.cc index 4fb1745d65aa..0f38979a7ca1 100644 --- a/src/relay/op/nn/pooling.cc +++ b/src/relay/op/nn/pooling.cc @@ -765,6 +765,7 @@ RELAY_REGISTER_OP("nn.max_pool2d_grad") .set_attrs_type() .set_num_inputs(2) .add_argument("data", "Tensor", "The input tensor.") + .add_argument("grad", "Tensor", "The grad tensor.") .set_support_level(2) .add_type_rel("MaxPool2DGrad", Pool2DGradRel) .set_attr("FTVMCompute", Pool2DGradCompute); @@ -812,6 +813,7 @@ RELAY_REGISTER_OP("nn.avg_pool2d_grad") .set_attrs_type() .set_num_inputs(2) .add_argument("data", "Tensor", "The input tensor.") + .add_argument("grad", "Tensor", "The grad tensor.") .set_support_level(2) .add_type_rel("MaxPool2DGrad", Pool2DGradRel) .set_attr("FTVMCompute", Pool2DGradCompute); diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 640943eac805..6819ea93f249 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -1041,6 +1041,7 @@ Given data with shape (Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1}) and indices with sh )code" TVM_ADD_FILELINE) .set_num_inputs(2) .add_argument("data", "Tensor", "The input tensor.") + .add_argument("indices", "Tensor", "The indices tensor.") .set_support_level(3) .add_type_rel("ScatterND", ScatterNDRel) .set_attr("TOpPattern", kInjective); @@ -1388,6 +1389,9 @@ RELAY_REGISTER_OP("arange") )code" TVM_ADD_FILELINE) .set_attrs_type() .set_num_inputs(3) + .add_argument("start", "Expr", "Start of interval. The interval includes this value.") + .add_argument("end", "Expr", "Stop of interval. The interval does not include this value.") + .add_argument("step", "Expr", "Spacing between values.") .set_support_level(3) .add_type_rel("Arange", ArangeRel) .set_attr("FTVMCompute", ArangeCompute) @@ -3030,6 +3034,7 @@ output shape will simply be (Y_0, ..., Y_{K-1}). )code" TVM_ADD_FILELINE) .set_num_inputs(2) .add_argument("data", "Tensor", "The input tensor.") + .add_argument("indices", "Tensor", "The indices of values to gather.") .set_support_level(3) .add_type_rel("GatherND", GatherNDRel) .set_attr("FTVMCompute", GatherNDCompute) @@ -3260,6 +3265,8 @@ Example:: - unravel_index([22, 41, 37], (7, 6)) = [[3, 6, 6], [4, 5, 1]] )code" TVM_ADD_FILELINE) .set_num_inputs(2) + .add_argument("data", "Tensor", "The input tensor.") + .add_argument("shape", "Tensor", "The shape tensor.") .set_support_level(3) .add_type_rel("UnRavelIndexRel", UnRavelIndexRel) .set_attr("FTVMCompute", UnRavelIndexCompute) diff --git a/src/relay/op/vm/vm.cc b/src/relay/op/vm/vm.cc index 8c1c9f3e9c59..0fb79206d71d 100644 --- a/src/relay/op/vm/vm.cc +++ b/src/relay/op/vm/vm.cc @@ -121,7 +121,9 @@ bool ShapeFuncRel(const Array& types, int num_inputs, const Attrs& attrs, RELAY_REGISTER_OP("vm.shape_func") .describe(R"code(Get the shape of a tensor.)code" TVM_ADD_FILELINE) .set_num_inputs(3) - .add_argument("tensor", "Tensor", "The tensor to retrieve the shape for.") + .add_argument("func", "Function", "The operation to call") + .add_argument("ins", "Tuple", "The input tensors.") + .add_argument("outs", "Tuple", "The output tensors.") .add_type_rel("ShapeFuncRel", ShapeFuncRel) .set_support_level(10) .set_attr("TOpPattern", kOpaque) diff --git a/src/target/source/intrin_rule_cuda.cc b/src/target/source/intrin_rule_cuda.cc index 0a68736bcd05..5c562f7b1643 100644 --- a/src/target/source/intrin_rule_cuda.cc +++ b/src/target/source/intrin_rule_cuda.cc @@ -186,18 +186,30 @@ TVM_REGISTER_GLOBAL("tvm.intrin.rule.cuda.fmod").set_body(DispatchPureExtern("TGlobalSymbol", "__shfl_sync") .set_attr("TCallEffectKind", Integer(CallEffectKind::kOpaque)) .set_attr("cuda.need_warp_shuffle", true); TVM_REGISTER_OP("tir.cuda.__shfl_up_sync") .set_num_inputs(4) + .add_argument("mask", "Expr", "The thread mask.") + .add_argument("var", "Expr", "The variable to sync.") + .add_argument("delta", "Expr", "The source lane id offset to be added.") + .add_argument("width", "Expr", "The warp thread width, must be a power of 2.") .set_attr("TGlobalSymbol", "__shfl_up_sync") .set_attr("TCallEffectKind", Integer(CallEffectKind::kOpaque)) .set_attr("cuda.need_warp_shuffle", true); TVM_REGISTER_OP("tir.cuda.__shfl_down_sync") .set_num_inputs(4) + .add_argument("mask", "Expr", "The thread mask.") + .add_argument("var", "Expr", "The variable to sync.") + .add_argument("delta", "Expr", "The source lane id offset to be subtracted.") + .add_argument("width", "Expr", "The warp thread width, must be a power of 2.") .set_attr("TGlobalSymbol", "__shfl_down_sync") .set_attr("TCallEffectKind", Integer(CallEffectKind::kOpaque)) .set_attr("cuda.need_warp_shuffle", true);