Skip to content

Commit

Permalink
[AutoParallel] Custom op support auto parallel (#58553)
Browse files Browse the repository at this point in the history
* custom op support auto parallel
  • Loading branch information
wanghuancoder authored Nov 7, 2023
1 parent 7c70a8c commit fe862dd
Show file tree
Hide file tree
Showing 13 changed files with 1,137 additions and 61 deletions.
5 changes: 5 additions & 0 deletions paddle/fluid/eager/custom_operator/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
cc_library(
custom_operator_node
SRCS custom_operator_node.cc
DEPS phi grad_node_info custom_operator utils custom_operator_utils)

cc_library(
custom_operator_utils
SRCS custom_operator_utils.cc
DEPS phi grad_node_info custom_operator utils)
31 changes: 16 additions & 15 deletions paddle/fluid/eager/custom_operator/custom_operator_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include "paddle/fluid/eager/custom_operator/custom_operator_node.h"

#include "paddle/fluid/eager/custom_operator/custom_operator_utils.h"
#include "paddle/fluid/framework/custom_operator.h"
#include "paddle/fluid/framework/custom_operator_utils.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
Expand Down Expand Up @@ -172,8 +173,6 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>,
paddle::OpMetaInfoHelper::GetInputs(vec_map[1]);
const auto& grad_outputs_names =
paddle::OpMetaInfoHelper::GetOutputs(vec_map[1]);
const auto& grad_inplace_map =
paddle::OpMetaInfoHelper::GetInplaceMap(vec_map[1]);
const auto& map =
egr::Controller::Instance().GetCustomEdgesSlotMap().at(op_type_);

Expand Down Expand Up @@ -251,11 +250,12 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>,
}
VLOG(7) << "Run Kernel of Grad Custom Op: " << op_type_ << "_grad";

// handle inplace map
ctx.UpdatePlainOutputs(
grad_inputs_name, grad_outputs_names, grad_inplace_map);
(*paddle::OpMetaInfoHelper::GetKernelFn(vec_map[1]))(&ctx);
ctx.AssignInplaceOutputs();
run_custom_op_impl(vec_map[1], false, false, ctx);

for (size_t i = 0; i < ctx.OutputRange().size(); ++i) {
auto output_pair = ctx.OutputRangeAt(i);
outs[i] = ctx.OutputsBetween(output_pair.first, output_pair.second);
}

// handle optional None output when construct backward graph
for (size_t i = 0; i < ctx.OutputRange().size(); i++) {
Expand All @@ -264,7 +264,9 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>,
ctx.MutableOutputAt(ctx.OutputRangeAt(i).first);
if (!out_tensor->initialized()) {
PADDLE_ENFORCE(
paddle::framework::detail::IsOptionalVar(grad_outputs_names.at(i)),
paddle::framework::detail::IsOptionalVar(
grad_outputs_names.at(i)) ||
out_tensor->is_dist_tensor(),
phi::errors::InvalidArgument(
"Custom grad operator's %d-th output is not initialized. "
"Please check your implementation again. If you are "
Expand Down Expand Up @@ -386,8 +388,6 @@ RunCustomOpDoubleGradNode::operator()(
paddle::OpMetaInfoHelper::GetInputs(vec_map[2]);
const auto& grad_outputs_names =
paddle::OpMetaInfoHelper::GetOutputs(vec_map[2]);
const auto& grad_inplace_map =
paddle::OpMetaInfoHelper::GetInplaceMap(vec_map[2]);
const auto& map =
egr::Controller::Instance().GetCustomEdgesSlotMap().at(op_type_);

Expand Down Expand Up @@ -451,11 +451,12 @@ RunCustomOpDoubleGradNode::operator()(
}
VLOG(7) << "Run Kernel of Grad Custom Op: " << op_type_ << "_grad_grad";

// handle inplace map
ctx.UpdatePlainOutputs(
grad_inputs_name, grad_outputs_names, grad_inplace_map);
(*paddle::OpMetaInfoHelper::GetKernelFn(vec_map[2]))(&ctx);
ctx.AssignInplaceOutputs();
run_custom_op_impl(vec_map[2], false, true, ctx);

for (size_t i = 0; i < ctx.OutputRange().size(); ++i) {
auto output_pair = ctx.OutputRangeAt(i);
outs[i] = ctx.OutputsBetween(output_pair.first, output_pair.second);
}

return outs;
}
Expand Down
Loading

0 comments on commit fe862dd

Please sign in to comment.