Skip to content

Commit

Permalink
[PTen]Add alias kernel name (#37881)
Browse files Browse the repository at this point in the history
* add alias kernel name

* modify code as suggestions
  • Loading branch information
YuanRisheng authored Dec 8, 2021
1 parent 1716324 commit ff6507d
Show file tree
Hide file tree
Showing 18 changed files with 118 additions and 71 deletions.
3 changes: 2 additions & 1 deletion paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1760,7 +1760,8 @@ OpKernelType OperatorWithKernel::GetKernelTypeForVar(

KernelSignature OperatorWithKernel::GetExpectedPtenKernelArgs(
const ExecutionContext& ctx) const {
return KernelSignatureMap::Instance().Get(Type());
return KernelSignatureMap::Instance().Get(
pten::TransToPtenKernelName(Type()));
}

void OperatorWithKernel::BuildPtenKernelContext(
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/framework/pten_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -101,10 +101,10 @@ KernelSignatureMap& KernelSignatureMap::Instance() {
if (pten::KernelFactory::Instance().HasCompatiblePtenKernel(op_type)) {
KernelArgsNameMakerByOpProto maker(op_proto);
VLOG(10) << "Register kernel signature for " << op_type;
auto success =
kernel_signature_map_->map_
.emplace(op_type, std::move(maker.GetKernelSignature()))
.second;
auto success = kernel_signature_map_->map_
.emplace(pten::TransToPtenKernelName(op_type),
std::move(maker.GetKernelSignature()))
.second;
PADDLE_ENFORCE_EQ(
success, true,
platform::errors::PermissionDenied(
Expand Down
15 changes: 7 additions & 8 deletions paddle/fluid/operators/elementwise/elementwise_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,26 +144,25 @@ class ElementwiseOp : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override {
if (Type() == "elementwise_add") {
if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
return framework::KernelSignature("elementwise_add", {"X", "Y"},
{"axis"}, {"Out"});
return framework::KernelSignature("add", {"X", "Y"}, {"axis"}, {"Out"});
}
}
if (Type() == "elementwise_sub") {
if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
return framework::KernelSignature("elementwise_sub", {"X", "Y"},
{"axis"}, {"Out"});
return framework::KernelSignature("subtract", {"X", "Y"}, {"axis"},
{"Out"});
}
}
if (Type() == "elementwise_div") {
if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
return framework::KernelSignature("elementwise_div", {"X", "Y"},
{"axis"}, {"Out"});
return framework::KernelSignature("divide", {"X", "Y"}, {"axis"},
{"Out"});
}
}
if (Type() == "elementwise_mul") {
if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
return framework::KernelSignature("elementwise_mul", {"X", "Y"},
{"axis"}, {"Out"});
return framework::KernelSignature("multiply", {"X", "Y"}, {"axis"},
{"Out"});
}
}
return framework::KernelSignature("None", {"X"}, {}, {"Out"});
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/fill_any_like_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class FillAnyLikeOp : public framework::OperatorWithKernel {

framework::KernelSignature GetExpectedPtenKernelArgs(
const framework::ExecutionContext &ctx) const override {
return framework::KernelSignature("fill_any_like", {}, {"value"}, {"Out"});
return framework::KernelSignature("full_like", {}, {"value"}, {"Out"});
}
};

Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/operators/fill_constant_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,7 @@ class FillConstantOp : public framework::OperatorWithKernel {
value = str_value.empty() ? "value" : "str_value";
}
if (!ctx.OutputVar("Out")->IsType<framework::SelectedRows>()) {
return framework::KernelSignature("fill_constant", {}, {shape, value},
{"Out"});
return framework::KernelSignature("full", {}, {shape, value}, {"Out"});
}
return framework::KernelSignature("fill_constant.unregistered", {}, {}, {});
}
Expand Down
7 changes: 3 additions & 4 deletions paddle/fluid/operators/flatten_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -337,11 +337,10 @@ class FlattenContiguousRangeOp : public framework::OperatorWithKernel {
framework::KernelSignature GetExpectedPtenKernelArgs(
const framework::ExecutionContext &ctx) const override {
if (ctx.HasOutput("XShape")) {
return framework::KernelSignature("flatten_contiguous_range.mid", {"X"},
{"start_axis", "stop_axis"},
{"Out", "XShape"});
return framework::KernelSignature(
"flatten.mid", {"X"}, {"start_axis", "stop_axis"}, {"Out", "XShape"});
} else {
return framework::KernelSignature("flatten_contiguous_range", {"X"},
return framework::KernelSignature("flatten", {"X"},
{"start_axis", "stop_axis"}, {"Out"});
}
}
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/operators/reshape_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -555,13 +555,13 @@ class Reshape2Op : public ReshapeOp {
const framework::ExecutionContext &ctx) const override {
auto multi_inputs = ctx.MultiInput<framework::Tensor>("ShapeTensor");
if (multi_inputs.size() > 0) {
return framework::KernelSignature("reshape2.mulhost",
{"X", "ShapeTensor"}, {}, {"Out"});
return framework::KernelSignature("reshape.mulhost", {"X", "ShapeTensor"},
{}, {"Out"});
} else if (ctx.HasInput("Shape")) {
return framework::KernelSignature("reshape2.host", {"X", "Shape"}, {},
return framework::KernelSignature("reshape.host", {"X", "Shape"}, {},
{"Out"});
} else {
return framework::KernelSignature("reshape2", {"X"}, {"shape"}, {"Out"});
return framework::KernelSignature("reshape", {"X"}, {"shape"}, {"Out"});
}
}
};
Expand Down
10 changes: 9 additions & 1 deletion paddle/pten/core/convert_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pten/core/convert_utils.h"

#include "paddle/pten/core/kernel_alias_name.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/platform/device/gpu/gpu_info.h"

Expand Down Expand Up @@ -270,4 +270,12 @@ std::string DataType2String(DataType dtype) {
}
}

const std::string& TransToPtenKernelName(const std::string& fluid_op_name) {
if (kernel_alias_name_map.find(fluid_op_name) !=
kernel_alias_name_map.end()) {
return kernel_alias_name_map.at(fluid_op_name);
}
return fluid_op_name;
}

} // namespace pten
2 changes: 2 additions & 0 deletions paddle/pten/core/convert_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ namespace pten {
using DataType = paddle::experimental::DataType;
using DataLayout = paddle::experimental::DataLayout;

const std::string& TransToPtenKernelName(const std::string& fluid_op_name);

Backend TransToPtenBackend(const paddle::platform::Place& place);
DataType TransToPtenDataType(
const paddle::framework::proto::VarType::Type& dtype);
Expand Down
41 changes: 41 additions & 0 deletions paddle/pten/core/kernel_alias_name.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

// TODO(yuanrisheng): this file may need to be removed
#pragma once

namespace pten {

// the key is kernel_name in fluid, the value is the kernel_name in pten
// the key is sorted by key's alphabet
const std::unordered_map<std::string, std::string> kernel_alias_name_map = {
{"elementwise_add", "add"},
{"elementwise_div", "divide"},
{"elementwise_mul", "muliply"},
{"elementwise_sub", "subtract"},
{"fill_any_like", "full_like"},
{"fill_constant", "full"},
{"flatten_contiguous_range", "flatten"},
// {"matmul_v2", "matmul"},
{"reduce_mean", "mean"},
{"reduce_sum", "sum"},
{"reshape2", "reshape"},
// fluid kernel "mean/reshape/matmul/flatten/sum" should be deprecated
{"flatten", "deprecated"},
// {"matmul", "deprecated"},
{"mean", "deprecated"},
{"reshape", "deprecated"},
{"sum", "deprecated"}};

} // namespace pten
3 changes: 2 additions & 1 deletion paddle/pten/core/kernel_factory.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include "paddle/pten/common/backend.h"
#include "paddle/pten/common/data_type.h"
#include "paddle/pten/common/layout.h"
#include "paddle/pten/core/convert_utils.h"
#include "paddle/pten/core/kernel_def.h"

// See Note [ Why still include the fluid headers? ]
Expand Down Expand Up @@ -269,7 +270,7 @@ class KernelFactory {
}

bool HasCompatiblePtenKernel(const std::string& op_type) const {
return compatible_op_types_.count(op_type) > 0;
return compatible_op_types_.count(TransToPtenKernelName(op_type)) > 0;
}

const Kernel& SelectKernelOrThrowError(const KernelName& kernel_name,
Expand Down
4 changes: 2 additions & 2 deletions paddle/pten/kernels/cpu/creation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ void FillConstant(const CPUContext& dev_ctx,

PT_REGISTER_MODULE(CreationCPU);

PT_REGISTER_KERNEL("fill_any_like",
PT_REGISTER_KERNEL("full_like",
CPU,
ANY,
pten::FillAnyLike,
Expand All @@ -74,7 +74,7 @@ PT_REGISTER_KERNEL("fill_any_like",
bool,
paddle::platform::float16) {}

PT_REGISTER_KERNEL("fill_constant",
PT_REGISTER_KERNEL("full",
CPU,
ANY,
pten::FillConstant,
Expand Down
19 changes: 8 additions & 11 deletions paddle/pten/kernels/cpu/manipulation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ PT_REGISTER_MODULE(ManipulationCPU);

// TODO(yuanrisheng): "flatten_contiguous_range" is compatible with old kernel
// architecture, kernel_name should be "flatten".
PT_REGISTER_KERNEL("flatten_contiguous_range",
PT_REGISTER_KERNEL("flatten",
CPU,
ANY,
pten::Flatten,
Expand All @@ -146,7 +146,7 @@ PT_REGISTER_KERNEL("flatten_contiguous_range",
int,
int64_t) {}

PT_REGISTER_KERNEL("flatten_contiguous_range.mid",
PT_REGISTER_KERNEL("flatten.mid",
CPU,
ANY,
pten::FlattenWithXShape,
Expand Down Expand Up @@ -176,40 +176,37 @@ PT_REGISTER_KERNEL("cast",

// TODO(yuanrisheng): "reshape2" is compatible with old kernel
// architecture, kernel_name should be "reshape".
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape",
CPU,
ANY,
pten::ReshapeFromVectorVal) {}

PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mid",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mid",
CPU,
ANY,
pten::ReshapeFromVectorValWithXShape) {}

PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host",
CPU,
ANY,
pten::ReshapeFromDT) {
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.host", CPU, ANY, pten::ReshapeFromDT) {
kernel->InputAt(1).SetBackend(pten::Backend::CPU);
kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32);
}

PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.host.mid",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.host.mid",
CPU,
ANY,
pten::ReshapeFromDTWithXShape) {
kernel->InputAt(1).SetBackend(pten::Backend::CPU);
kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32);
}
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mulhost",
CPU,
ANY,
pten::ReshapeFromVectorDT) {
kernel->InputAt(1).SetBackend(pten::Backend::CPU);
kernel->InputAt(1).SetDataType(paddle::experimental::DataType::INT32);
}

PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape2.mulhost.mid",
PT_REGISTER_KERNEL_WITH_NO_TYPE("reshape.mulhost.mid",
CPU,
ANY,
pten::ReshapeFromVectorDTWithXShape) {
Expand Down
12 changes: 6 additions & 6 deletions paddle/pten/kernels/cpu/math.cc
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ using complex128 = ::paddle::platform::complex<double>;
// using bfloat16 = ::paddle::platform::bfloat16;

PT_REGISTER_KERNEL("sign", CPU, ANY, pten::Sign, float, double) {}
PT_REGISTER_KERNEL("reduce_mean", CPU, ANY, pten::Mean, float, double, bool) {}
PT_REGISTER_KERNEL("mean", CPU, ANY, pten::Mean, float, double, bool) {}
PT_REGISTER_KERNEL("scale",
CPU,
ANY,
Expand All @@ -130,7 +130,7 @@ PT_REGISTER_KERNEL("scale",
int,
int64_t) {}

PT_REGISTER_KERNEL("elementwise_add",
PT_REGISTER_KERNEL("add",
CPU,
ANY,
pten::ElementwiseAdd,
Expand All @@ -140,7 +140,7 @@ PT_REGISTER_KERNEL("elementwise_add",
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL("elementwise_sub",
PT_REGISTER_KERNEL("subtract",
CPU,
ANY,
pten::ElementwiseSub,
Expand All @@ -150,7 +150,7 @@ PT_REGISTER_KERNEL("elementwise_sub",
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL("elementwise_div",
PT_REGISTER_KERNEL("divide",
CPU,
ANY,
pten::ElementwiseDiv,
Expand All @@ -160,7 +160,7 @@ PT_REGISTER_KERNEL("elementwise_div",
int64_t,
complex64,
complex128) {}
PT_REGISTER_KERNEL("elementwise_mul",
PT_REGISTER_KERNEL("multiply",
CPU,
ANY,
pten::ElementwiseMul,
Expand All @@ -172,7 +172,7 @@ PT_REGISTER_KERNEL("elementwise_mul",
complex64,
complex128) {}

PT_REGISTER_KERNEL("reduce_sum",
PT_REGISTER_KERNEL("sum",
CPU,
ANY,
pten::Sum,
Expand Down
4 changes: 2 additions & 2 deletions paddle/pten/kernels/cuda/creation.cu
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ void FillConstant(const CUDAContext& dev_ctx,

PT_REGISTER_MODULE(CreationCUDA);

PT_REGISTER_KERNEL("fill_any_like",
PT_REGISTER_KERNEL("full_like",
CUDA,
ANY,
pten::FillAnyLike,
Expand All @@ -75,7 +75,7 @@ PT_REGISTER_KERNEL("fill_any_like",
bool,
paddle::platform::float16) {}

PT_REGISTER_KERNEL("fill_constant",
PT_REGISTER_KERNEL("full",
CUDA,
ANY,
pten::FillConstant,
Expand Down
Loading

0 comments on commit ff6507d

Please sign in to comment.