Skip to content

Commit

Permalink
Fixed auto codegen for intermediate tensors
Browse files Browse the repository at this point in the history
  • Loading branch information
jim19930609 committed Feb 22, 2022
1 parent 5333641 commit 62ca145
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 22 deletions.
41 changes: 22 additions & 19 deletions paddle/fluid/eager/auto_code_generator/eager_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1016,8 +1016,6 @@ static std::string GenerateGradNodeCreationContent(
const std::string& output_name = output.name();
const std::string& output_autograd_name = "p_autograd_" + output_name;

// Skip Intermediate Tensor

if (output.duplicable()) {
const char* GET_MULTI_AUTOGRAD_META_TEMPLATE =
" std::vector<egr::AutogradMeta*> %s = "
Expand Down Expand Up @@ -1147,20 +1145,20 @@ static std::string GenerateGradNodeCreationContent(
const std::string& output_autograd_name = "p_autograd_" + output_name;
size_t output_position = fwd_outputs_name_pos_map.at(output_name);

// Intermediate Tensor does not require SetHistory, nor RetainGrad

if (output.duplicable()) {
pass_stop_gradient_args += ", &" + output_autograd_name;
const char* SET_OUT_RANK_TEMPLATE =
" egr::EagerUtils::SetOutRankWithSlot(&%s, %d);\n";
grad_node_creation_str += paddle::string::Sprintf(
SET_OUT_RANK_TEMPLATE, output_autograd_name, output_position);

const char* SET_HISTORY_TEMPLATE =
" egr::EagerUtils::SetHistory(&%s, grad_node);\n";
grad_node_creation_str +=
paddle::string::Sprintf(SET_HISTORY_TEMPLATE, output_autograd_name);

// Intermediate Tensor does not require SetHistory
if (!output.intermediate()) {
const char* SET_HISTORY_TEMPLATE =
" egr::EagerUtils::SetHistory(&%s, grad_node);\n";
grad_node_creation_str +=
paddle::string::Sprintf(SET_HISTORY_TEMPLATE, output_autograd_name);
}
const char* SET_GRAD_IN_META_TEMPLATE =
" grad_node->SetGradInMeta(&%s, %d);\n";
grad_node_creation_str += paddle::string::Sprintf(
Expand All @@ -1173,22 +1171,27 @@ static std::string GenerateGradNodeCreationContent(
grad_node_creation_str += paddle::string::Sprintf(
SET_OUT_RANK_TEMPLATE, output_autograd_name, output_position);

const char* SET_HISTORY_TEMPLATE =
" egr::EagerUtils::SetHistory(%s, grad_node);\n";
grad_node_creation_str +=
paddle::string::Sprintf(SET_HISTORY_TEMPLATE, output_autograd_name);

// Intermediate Tensor does not require SetHistory
if (!output.intermediate()) {
const char* SET_HISTORY_TEMPLATE =
" egr::EagerUtils::SetHistory(%s, grad_node);\n";
grad_node_creation_str +=
paddle::string::Sprintf(SET_HISTORY_TEMPLATE, output_autograd_name);
}
const char* SET_GRAD_IN_META_TEMPLATE =
" grad_node->SetGradInMeta(%s, %d);\n";
grad_node_creation_str += paddle::string::Sprintf(
SET_GRAD_IN_META_TEMPLATE, output_autograd_name, output_position);
}

VLOG(6) << "Generated Call RetainGradForTensor";
const char* RETAIN_GRAD_TEMPLATE =
" egr::EagerUtils::CheckAndRetainGrad(%s);\n";
grad_node_creation_str +=
paddle::string::Sprintf(RETAIN_GRAD_TEMPLATE, output_name);
// Intermediate Tensor does not require CheckAndRetainGrad
if (!output.intermediate()) {
VLOG(6) << "Generated Call RetainGradForTensor";
const char* RETAIN_GRAD_TEMPLATE =
" egr::EagerUtils::CheckAndRetainGrad(%s);\n";
grad_node_creation_str +=
paddle::string::Sprintf(RETAIN_GRAD_TEMPLATE, output_name);
}
}
VLOG(6) << "Generated SetGradIn/OutMeta";

Expand Down
7 changes: 4 additions & 3 deletions paddle/fluid/eager/backward.cc
Original file line number Diff line number Diff line change
Expand Up @@ -227,10 +227,11 @@ void RunBackward(const std::vector<paddle::experimental::Tensor>& tensors,
<< " 's name is: " << grad_output_tensor.name();

auto* next_node = next_node_shared.get();

if (!node_input_buffers_dict.count(next_node)) {
node_input_buffers_dict[next_node] =
std::make_unique<GradTensorHolder>(next_node->InputMeta());
const auto& input_meta = next_node->InputMeta();
auto grad_tensor_holder =
std::make_unique<GradTensorHolder>(input_meta);
node_input_buffers_dict[next_node] = std::move(grad_tensor_holder);
}
VLOG(6) << "Sum grad inputs for edge slot: " << edge_rank.first
<< ", rank: " << edge_rank.second;
Expand Down
15 changes: 15 additions & 0 deletions paddle/fluid/eager/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -122,12 +122,27 @@ paddle::experimental::Tensor* EagerUtils::mutable_grad(
void EagerUtils::SetHistory(std::vector<AutogradMeta*>* autograd_metas,
const std::shared_ptr<GradNodeBase>& grad_node) {
for (const auto& autograd_meta : *autograd_metas) {
if (dynamic_cast<GradNodeAccumulation*>(autograd_meta->GradNode())) {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Error: Reseting GradNodeAccumulation for leaf tensor is not allowed."
"You're not expected to hit this error, likely an internal issue "
"with Paddle."
"Please contact Paddle developers for more details."));
}
autograd_meta->SetGradNode(grad_node);
}
}

void EagerUtils::SetHistory(AutogradMeta* autograd_meta,
const std::shared_ptr<GradNodeBase>& grad_node) {
if (dynamic_cast<GradNodeAccumulation*>(autograd_meta->GradNode())) {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Error: Reseting GradNodeAccumulation for leaf tensor is not allowed."
"You're not expected to hit this error, likely an internal issue with "
"Paddle."
"Please contact Paddle developers for more details."));
}

autograd_meta->SetGradNode(grad_node);
}

Expand Down

0 comments on commit 62ca145

Please sign in to comment.