Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refine the format of printing tensor #27673

Merged
merged 12 commits into from
Oct 13, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions paddle/fluid/framework/tensor_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,14 @@ See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/tensor_util.h"

#include <algorithm>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>

#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/profiler.h"

Expand Down Expand Up @@ -937,6 +940,12 @@ void TensorFromDLPack(const ::DLTensor& dl_tensor, framework::Tensor* dst) {
#endif
}

template <typename T>
std::string format_tensor(const framework::Tensor& tensor) {
// TODO(zhiqiu): use the print option to format tensor.
return "NOT IMPLEMENTED";
}

template <typename T>
std::ostream& print_tensor(std::ostream& os, const framework::Tensor& tensor) {
auto inspect = tensor.data<T>();
Expand Down
20 changes: 20 additions & 0 deletions paddle/fluid/framework/tensor_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,26 @@ limitations under the License. */
namespace paddle {
namespace framework {

class PrintOptions {
public:
static PrintOptions& Instance() {
static PrintOptions instance;
return instance;
}
~PrintOptions() {}
PrintOptions(const PrintOptions& o) = delete;
const PrintOptions& operator=(const PrintOptions& o) = delete;

int precision = 8;
int threshold = 1000;
int edgeitems = 3;
int linewidth = 75;
bool sci_mode = false;

private:
PrintOptions() {}
};

// NOTE(zcd): Because TensorCopy is an async operation, when the src_place
// and dst_place are two different GPU, to ensure that the operation can
// be carried out correctly, there is a src_ctx wait operation in TensorCopy.
Expand Down
8 changes: 5 additions & 3 deletions paddle/fluid/imperative/tests/test_tracer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,12 @@
//

#include <paddle/fluid/framework/op_registry.h>

#include <memory>
#include <set>
#include <string>
#include <vector>

#include "gtest/gtest.h"
#include "paddle/fluid/imperative/basic_engine.h"
#include "paddle/fluid/imperative/tracer.h"
Expand Down Expand Up @@ -286,9 +288,9 @@ TEST(test_tracer, test_unique_name_generator) {
ASSERT_STREQ("fc_1", fc_2.c_str());
// use `eager_tmp` as key if not specify it.
auto tmp_var_2 = tracer.GenerateUniqueName();
ASSERT_STREQ("eager_tmp_2", tmp_var_2.c_str());
auto tmp_var_3 = tracer.GenerateUniqueName("eager_tmp");
ASSERT_STREQ("eager_tmp_3", tmp_var_3.c_str());
ASSERT_STREQ("dygraph_tmp_2", tmp_var_2.c_str());
auto tmp_var_3 = tracer.GenerateUniqueName("dygraph_tmp");
ASSERT_STREQ("dygraph_tmp_3", tmp_var_3.c_str());
}

TEST(test_tracer, test_current_tracer) {
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/imperative/tracer.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <string>
#include <unordered_map>
#include <vector>

#include "ThreadPool.h"
#include "paddle/fluid/imperative/basic_engine.h"
#include "paddle/fluid/imperative/jit/program_desc_tracer.h"
Expand All @@ -32,7 +33,7 @@ namespace imperative {
class UniqueNameGenerator {
public:
explicit UniqueNameGenerator(std::string prefix = "") : prefix_(prefix) {}
std::string Generate(std::string key = "eager_tmp") {
std::string Generate(std::string key = "dygraph_tmp") {
return prefix_ + key + "_" + std::to_string(id_++);
}

Expand Down Expand Up @@ -83,7 +84,7 @@ class Tracer {
// name like `tmp_0` in some cases when transform dygraph into static layers.
// So we modify the default prefix key into `eager_tmp` to distinguish with
// static graph.
std::string GenerateUniqueName(std::string key = "eager_tmp") {
std::string GenerateUniqueName(std::string key = "dygraph_tmp") {
return generator_->Generate(key);
}

Expand Down
8 changes: 7 additions & 1 deletion paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -833,6 +833,12 @@ void BindImperative(py::module *m_ptr) {
.def_property_readonly(
"place", [](imperative::VarBase &self) { return self.Place(); },
py::return_value_policy::copy)
.def_property_readonly("_place_str",
[](imperative::VarBase &self) {
std::stringstream ostr;
ostr << self.Place();
return ostr.str();
})
.def_property_readonly("type", &imperative::VarBase::Type)
.def_property_readonly("dtype", &imperative::VarBase::DataType);

Expand Down Expand Up @@ -890,7 +896,7 @@ void BindImperative(py::module *m_ptr) {
&imperative::Tracer::GetProgramDescTracer,
py::return_value_policy::reference)
.def("_generate_unique_name", &imperative::Tracer::GenerateUniqueName,
py::arg("key") = "eager_tmp")
py::arg("key") = "dygraph_tmp")
.def(
"_set_amp_op_list",
[](imperative::Tracer &self,
Expand Down
29 changes: 29 additions & 0 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ limitations under the License. */
#include <utility>
#include <vector>

#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
Expand All @@ -45,6 +46,7 @@ limitations under the License. */
#include "paddle/fluid/framework/save_load_util.h"
#include "paddle/fluid/framework/scope_pool.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/trainer.h"
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/version.h"
Expand Down Expand Up @@ -427,6 +429,31 @@ PYBIND11_MODULE(core_noavx, m) {
return op_compatible_map.ConvertToProto(desc.OpCompatibleMap());
});

m.def("set_printoptions", [](const py::kwargs &kwargs) {
auto &print_opt = framework::PrintOptions::Instance();
if (kwargs.contains("precision")) {
print_opt.precision = kwargs["precision"].cast<int>();
}
if (kwargs.contains("threshold")) {
print_opt.threshold = kwargs["threshold"].cast<int>();
}
if (kwargs.contains("edgeitems")) {
print_opt.edgeitems = kwargs["edgeitems"].cast<int>();
}
if (kwargs.contains("linewidth")) {
print_opt.linewidth = kwargs["linewidth"].cast<int>();
}
if (kwargs.contains("sci_mode")) {
print_opt.sci_mode = kwargs["sci_mode"].cast<bool>();
}

VLOG(4) << "Set printoptions: precision=" << print_opt.precision
<< ", threshold=" << print_opt.threshold
<< ", edgeitems=" << print_opt.edgeitems
<< ", linewidth=" << print_opt.linewidth
<< ", sci_mode=" << print_opt.sci_mode;
});

m.def(
"_append_python_callable_object_and_return_id",
[](py::object py_obj) -> size_t {
Expand Down Expand Up @@ -616,6 +643,8 @@ PYBIND11_MODULE(core_noavx, m) {
.def("_get_double_element", TensorGetElement<double>)
.def("_place", [](Tensor &self) { return self.place(); })
.def("_dtype", [](Tensor &self) { return self.type(); })
.def("_layout",
[](Tensor &self) { return DataLayoutToString(self.layout()); })
.def("_share_data_with", &Tensor::ShareDataWith)
.def("__getitem__", PySliceTensor, py::return_value_policy::reference)
.def("__str__", [](const Tensor &self) {
Expand Down
3 changes: 3 additions & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,9 @@
from .tensor.search import index_select #DEFINE_ALIAS
from .tensor.search import nonzero #DEFINE_ALIAS
from .tensor.search import sort #DEFINE_ALIAS

from .tensor.to_string import set_printoptions

from .framework.random import manual_seed #DEFINE_ALIAS
from .framework.random import get_cuda_rng_state #DEFINE_ALIAS
from .framework.random import set_cuda_rng_state #DEFINE_ALIAS
Expand Down
21 changes: 7 additions & 14 deletions python/paddle/fluid/dygraph/varbase_patch_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,22 +228,15 @@ def __str__(self):
.. code-block:: python

import paddle
paddle.disable_static()
x = paddle.rand([1, 5])
x = paddle.rand([2, 5])
print(x)
# Variable: eager_tmp_0
# - place: CUDAPlace(0)
# - shape: [1, 5]
# - layout: NCHW
# - dtype: float
# - data: [0.645307 0.597973 0.732793 0.646921 0.540328]
paddle.enable_static()

# Tensor(shape=[2, 5], dtype=float32, place=CPUPlace,
# [[0.30574632, 0.55739117, 0.30902600, 0.39413780, 0.44830436],
# [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]])
"""
tensor = self.value().get_tensor()
if tensor._is_initialized():
return 'Tensor: %s\n%s' % (self.name, str(tensor))
else:
return 'Tensor: %s, not initialized' % (self.name)
from paddle.tensor.to_string import to_string
return to_string(self)

@property
def block(self):
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -5263,8 +5263,8 @@ def __str__(self):
# - data: [...]
paddle.enable_static()
"""
return "Parameter containing:\n {}\n - stop_gradient: {}".format(
super(ParamBase, self).__str__(), self.stop_gradient)
return "Parameter containing:\n{tensor}".format(
tensor=super(ParamBase, self).__str__())

__repr__ = __str__

Expand Down
6 changes: 3 additions & 3 deletions python/paddle/fluid/tests/unittests/test_unique_name.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,10 @@ def test_name_generator(self):
with fluid.dygraph.guard():
tracer = fluid.framework._dygraph_tracer()
tmp_var_0 = tracer._generate_unique_name()
self.assertEqual(tmp_var_0, "eager_tmp_0")
self.assertEqual(tmp_var_0, "dygraph_tmp_0")

tmp_var_1 = tracer._generate_unique_name("eager_tmp")
self.assertEqual(tmp_var_1, "eager_tmp_1")
tmp_var_1 = tracer._generate_unique_name("dygraph_tmp")
self.assertEqual(tmp_var_1, "dygraph_tmp_1")


if __name__ == '__main__':
Expand Down
30 changes: 30 additions & 0 deletions python/paddle/fluid/tests/unittests/test_var_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,36 @@ def _assert_to_static(self, var_base, static_var, is_param=False):

self.assertListEqual(list(var_base.shape), list(static_var.shape))

def test_tensor_str(self):
paddle.disable_static(paddle.CPUPlace())
paddle.manual_seed(10)
a = paddle.rand([10, 20])
paddle.set_printoptions(4, 100, 3)
a_str = str(a)

if six.PY2:
expected = '''Tensor(shape=[10L, 20L], dtype=float32, place=CPUPlace, stop_gradient=True,
[[0.2727, 0.5489, 0.8655, ..., 0.2916, 0.8525, 0.9000],
[0.3806, 0.8996, 0.0928, ..., 0.9535, 0.8378, 0.6409],
[0.1484, 0.4038, 0.8294, ..., 0.0148, 0.6520, 0.4250],
...,
[0.3426, 0.1909, 0.7240, ..., 0.4218, 0.2676, 0.5679],
[0.5561, 0.2081, 0.0676, ..., 0.9778, 0.3302, 0.9559],
[0.2665, 0.8483, 0.5389, ..., 0.4956, 0.6862, 0.9178]])'''

else:
expected = '''Tensor(shape=[10, 20], dtype=float32, place=CPUPlace, stop_gradient=True,
[[0.2727, 0.5489, 0.8655, ..., 0.2916, 0.8525, 0.9000],
[0.3806, 0.8996, 0.0928, ..., 0.9535, 0.8378, 0.6409],
[0.1484, 0.4038, 0.8294, ..., 0.0148, 0.6520, 0.4250],
...,
[0.3426, 0.1909, 0.7240, ..., 0.4218, 0.2676, 0.5679],
[0.5561, 0.2081, 0.0676, ..., 0.9778, 0.3302, 0.9559],
[0.2665, 0.8483, 0.5389, ..., 0.4956, 0.6862, 0.9178]])'''

self.assertEqual(a_str, expected)
paddle.enable_static()


class TestVarBaseSetitem(unittest.TestCase):
def setUp(self):
Expand Down
1 change: 1 addition & 0 deletions python/paddle/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,3 +194,4 @@
# from .tensor import Tensor #DEFINE_ALIAS
# from .tensor import LoDTensor #DEFINE_ALIAS
# from .tensor import LoDTensorArray #DEFINE_ALIAS
from .to_string import set_printoptions
Loading