Skip to content

Commit

Permalink
Added EagerUtils to Eager Dygraph
Browse files Browse the repository at this point in the history
  • Loading branch information
jim19930609 committed Nov 23, 2021
1 parent 4812eda commit c8e988f
Show file tree
Hide file tree
Showing 10 changed files with 365 additions and 1 deletion.
3 changes: 3 additions & 0 deletions paddle/fluid/eager/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
add_subdirectory(api)
add_subdirectory(tests)
cc_library(grad_node_info SRCS grad_node_info.cc DEPS pten pten_api)
cc_library(autograd_meta SRCS autograd_meta.cc DEPS pten pten_api)

cc_library(utils SRCS utils.cc DEPS pten pten_api global_utils layer proto_desc operator op_registry variable_helper memcpy scale_op autograd_meta)
1 change: 1 addition & 0 deletions paddle/fluid/eager/api/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
add_subdirectory(utils)
1 change: 1 addition & 0 deletions paddle/fluid/eager/api/utils/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
cc_library(global_utils SRCS global_utils.cc DEPS enforce)
22 changes: 22 additions & 0 deletions paddle/fluid/eager/api/utils/global_utils.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//

#include "paddle/fluid/eager/api/utils/global_utils.h"

namespace egr {

Controller* Controller::controller_ = new Controller();

} // namespace egr
62 changes: 62 additions & 0 deletions paddle/fluid/eager/api/utils/global_utils.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//

#pragma once

#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/platform/enforce.h"

namespace egr {

class UniqueNameGenerator {
public:
explicit UniqueNameGenerator(std::string prefix = "") : prefix_(prefix) {}
std::string Generate(std::string key = "eager_tmp") {
return prefix_ + key + "_" + std::to_string(id_++);
}

private:
std::atomic<int> id_{0};
std::string prefix_;
};

// Global
class Controller {
public:
static Controller& Instance() { return *controller_; }
const paddle::platform::Place& GetExpectedPlace() const {
return *expected_place_.get();
}
void SetExpectedPlace(const paddle::platform::Place& place) {
expected_place_ = std::make_shared<paddle::platform::Place>(place);
}
void SetAMPLevel(int level) { amp_level_ = level; }
int GetAMPLevel() const { return amp_level_; }
bool HasGrad() const { return has_grad_; }
std::string GenerateUniqueName(std::string key = "eager_tmp") {
return generator_->Generate(key);
}

private:
Controller() = default;
static Controller* controller_;
std::shared_ptr<paddle::platform::Place> expected_place_ = nullptr;
int amp_level_ = 0;
bool has_grad_ = true;
std::unique_ptr<UniqueNameGenerator> generator_{new UniqueNameGenerator()};
DISABLE_COPY_AND_ASSIGN(Controller);
};

} // namespace egr
3 changes: 2 additions & 1 deletion paddle/fluid/eager/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
set(eager_deps pten pten_api)
set(eager_deps pten pten_api pten_tensor utils)
add_subdirectory(data_structure_tests)
add_subdirectory(task_tests)
1 change: 1 addition & 0 deletions paddle/fluid/eager/tests/task_tests/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
cc_test(test_egr_ds_eager_utils SRCS eager_utils_test.cc DEPS ${eager_deps})
101 changes: 101 additions & 0 deletions paddle/fluid/eager/tests/task_tests/eager_utils_test.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <sstream>

#include "gtest/gtest.h"

#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/fluid/eager/tests/data_structure_tests/grad_node_test.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/pten/api/lib/utils/allocator.h"

#include "paddle/pten/core/kernel_registry.h"

// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT

TEST(EagerUtils, AutoGradMeta) {
// Construct Eager Tensor
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt0 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
meta);
dt0->mutable_data<float>()[0] = 10.0;
EagerTensor et0 = EagerTensor(dt0);

std::shared_ptr<pten::DenseTensor> dt1 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
meta);
dt1->mutable_data<float>()[0] = 20.0;
EagerTensor et1 = EagerTensor(dt1);

std::vector<EagerTensor> ets = {et0, et1};
auto test_node = std::make_shared<eager_test::GradTestNode>();

// unsafe_autograd_meta()
// autograd_meta()
// multi_autograd_meta()
AutogradMeta* autograd_meta0 = EagerUtils::autograd_meta(&et0);
AutogradMeta* autograd_meta1 = EagerUtils::autograd_meta(&et1);

AutogradMeta* unsafe_autograd_meta_after =
EagerUtils::unsafe_autograd_meta(et0);
CHECK_NOTNULL(unsafe_autograd_meta_after);

std::vector<AutogradMeta*> autograd_metas =
EagerUtils::multi_autograd_meta(&ets);
std::vector<AutogradMeta*> unsafe_autograd_metas =
EagerUtils::unsafe_autograd_meta(&ets);
CHECK_NOTNULL(unsafe_autograd_metas[0]);
CHECK_NOTNULL(unsafe_autograd_metas[1]);

// Set Autograd Meta
autograd_meta0->SetSingleOutRankWithSlot(0, 1);

autograd_meta0->SetGradNode(test_node);

// OutRankInfo()
std::pair<size_t, size_t> out_rank_info0 = EagerUtils::OutRankInfo(et0);
CHECK_EQ(static_cast<int>(out_rank_info0.first), 0);
CHECK_EQ(static_cast<int>(out_rank_info0.second), 1);

// grad_node()
std::shared_ptr<GradNodeBase> grad_node0 = EagerUtils::grad_node(et0);
CHECK_NOTNULL(grad_node0.get());

EagerUtils::SetHistory(autograd_meta1, test_node);
EagerUtils::SetHistory({autograd_meta1}, test_node);
std::shared_ptr<GradNodeBase> grad_node1 = EagerUtils::grad_node(et1);
CHECK_NOTNULL(grad_node1.get());

// SetOutRankWithSlot()
EagerUtils::SetOutRankWithSlot(autograd_meta1, 0);
std::pair<size_t, size_t> out_rank_info1 = EagerUtils::OutRankInfo(et1);
CHECK_EQ(static_cast<int>(out_rank_info1.first), 0);
CHECK_EQ(static_cast<int>(out_rank_info1.second), 0);

EagerUtils::SetOutRankWithSlot(&autograd_metas, 0);
std::pair<size_t, size_t> out_rank_info2 = EagerUtils::OutRankInfo(et0);
CHECK_EQ(static_cast<int>(out_rank_info2.first), 0);
CHECK_EQ(static_cast<int>(out_rank_info2.second), 0);

std::pair<size_t, size_t> out_rank_info3 = EagerUtils::OutRankInfo(et1);
CHECK_EQ(static_cast<int>(out_rank_info3.first), 0);
CHECK_EQ(static_cast<int>(out_rank_info3.second), 1);
}
104 changes: 104 additions & 0 deletions paddle/fluid/eager/utils.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/eager/api/utils/global_utils.h"

#include "paddle/pten/api/all.h"
#include "paddle/pten/common/layout.h"
#include "paddle/pten/core/tensor_meta.h"

#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/framework/variable.h"

namespace egr {
/**
* Implementation of Eager Utils.
**/

AutogradMeta* EagerUtils::autograd_meta(egr::EagerTensor* target) {
auto* p_autograd_meta = target->get_autograd_meta();
if (!p_autograd_meta) {
auto p_autograd_meta_ptr = std::make_shared<AutogradMeta>();
p_autograd_meta = p_autograd_meta_ptr.get();
target->set_autograd_meta(p_autograd_meta_ptr);
}
return static_cast<AutogradMeta*>(p_autograd_meta);
}

AutogradMeta* EagerUtils::unsafe_autograd_meta(const egr::EagerTensor& target) {
auto* p_autograd_meta = target.get_autograd_meta();
PADDLE_ENFORCE(p_autograd_meta,
paddle::platform::errors::Fatal(
"Null autograd_meta gotten from unsafe_autograd_meta()"));
return static_cast<AutogradMeta*>(p_autograd_meta);
}

std::vector<AutogradMeta*> EagerUtils::unsafe_autograd_meta(
std::vector<egr::EagerTensor>* targets) {
std::vector<AutogradMeta*> metas;
for (const egr::EagerTensor& t : *targets) {
metas.push_back(unsafe_autograd_meta(t));
}
return metas;
}

std::vector<AutogradMeta*> EagerUtils::multi_autograd_meta(
std::vector<egr::EagerTensor>* targets) {
std::vector<AutogradMeta*> ret;
ret.reserve(targets->size());

// for multi_autograd_meta we can tolerent it has nullptr.
for (auto& t : (*targets)) {
auto* p_autograd_meta = autograd_meta(&t);
ret.push_back(static_cast<AutogradMeta*>(p_autograd_meta));
}
return ret;
}

std::pair<size_t, size_t> EagerUtils::OutRankInfo(
const egr::EagerTensor& target) {
return unsafe_autograd_meta(target)->OutRankInfo();
}

std::shared_ptr<GradNodeBase> EagerUtils::grad_node(
const egr::EagerTensor& target) {
return unsafe_autograd_meta(target)->GetMutableGradNode();
}

void EagerUtils::SetHistory(std::vector<AutogradMeta*>* autograd_metas,
const std::shared_ptr<GradNodeBase>& grad_node) {
for (const auto& autograd_meta : *autograd_metas) {
autograd_meta->SetGradNode(grad_node);
}
}

void EagerUtils::SetHistory(AutogradMeta* autograd_meta,
const std::shared_ptr<GradNodeBase>& grad_node) {
autograd_meta->SetGradNode(grad_node);
}

void EagerUtils::SetOutRankWithSlot(std::vector<AutogradMeta*>* targets,
size_t slot_id) {
// Set OutRankInfo from 0 to size of targets
for (size_t i = 0; i < targets->size(); i++) {
(*targets)[i]->SetSingleOutRankWithSlot(slot_id, i);
}
}
void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) {
target->SetSingleOutRankWithSlot(slot_id, 0);
}

} // namespace egr
68 changes: 68 additions & 0 deletions paddle/fluid/eager/utils.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/grad_node_info.h"

#include "paddle/pten/api/all.h"

namespace egr {

/**
* EagerUtils is utils used to do some static conversion or autograd
* members access, this class is desinged to be a full static functional
* utils class
* **/

class EagerUtils {
public:
/**
* We have to use autograd_meta and multi_autograd_meta to initialize
* autograd_meta for tensor, since we can't init it in
* egr::EagerTensor's
* constructor (it's abstract class there)
*
* **/
static AutogradMeta* autograd_meta(egr::EagerTensor* target);

static std::vector<AutogradMeta*> multi_autograd_meta(
std::vector<egr::EagerTensor>* targets);

static std::pair<size_t, size_t> OutRankInfo(const egr::EagerTensor& target);

static std::shared_ptr<GradNodeBase> grad_node(
const egr::EagerTensor& target);

// Set history is used to set backward info during forward process, it will
// set forward var's autograd meta's grad node as current backward node.
static void SetHistory(std::vector<AutogradMeta*>* autograd_metas,
const std::shared_ptr<GradNodeBase>& grad_node);
static void SetHistory(AutogradMeta* autograd_meta,
const std::shared_ptr<GradNodeBase>& grad_node);

// This is used for Set vector of tensors' rank
static void SetOutRankWithSlot(std::vector<AutogradMeta*>* targets,
size_t slot_id);
static void SetOutRankWithSlot(AutogradMeta* target, size_t slot_id);

// This method will return an AutogradMeta pointer unsafely.
static AutogradMeta* unsafe_autograd_meta(const egr::EagerTensor& target);
static std::vector<AutogradMeta*> unsafe_autograd_meta(
std::vector<egr::EagerTensor>* targets);
};

} // namespace egr

0 comments on commit c8e988f

Please sign in to comment.