Skip to content

Commit

Permalink
refine
Browse files Browse the repository at this point in the history
  • Loading branch information
wanghuancoder committed Nov 18, 2021
1 parent 2119c06 commit 9b476a0
Show file tree
Hide file tree
Showing 4 changed files with 51 additions and 55 deletions.
77 changes: 36 additions & 41 deletions paddle/fluid/pybind/eager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,13 @@ namespace pybind {

namespace py = ::pybind11;

PyTypeObject* pEagerTensorType;
PyTypeObject* p_eager_tensor_type;

PyObject* eagertensor_new(PyTypeObject* type, PyObject* args,
PyObject* kwargs) {
PyObject* obj = type->tp_alloc(type, 0);
if (obj) {
auto v = (EagerTensorObject*)obj; // NOLINT
auto v = reinterpret_cast<EagerTensorObject*>(obj);
new (&(v->eagertensor)) egr::EagerTensor();
}
return obj;
Expand All @@ -49,16 +49,11 @@ static void eagertensor_dealloc(EagerTensorObject* self) {
Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self));
}

static int eagertensor_init(EagerTensorObject* self, PyObject* args,
PyObject* kwargs) {
return 0;
}

extern struct PyGetSetDef variable_properties[];

extern PyMethodDef variable_methods[];

PyTypeObject EagerTensorType = {
PyTypeObject eager_tensor_type = {
PyVarObject_HEAD_INIT(NULL, 0) "core_avx.eager.EagerTensor", /* tp_name */
sizeof(EagerTensorObject), /* tp_basicsize */
0, /* tp_itemsize */
Expand All @@ -78,50 +73,50 @@ PyTypeObject EagerTensorType = {
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE |
Py_TPFLAGS_HEAPTYPE, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
variable_methods, /* tp_methods */
0, /* tp_members */
variable_properties, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)eagertensor_init, /* tp_init */
0, /* tp_alloc */
eagertensor_new, /* tp_new */
0, /* tp_free */
0, /* tp_is_gc */
0, /* tp_bases */
0, /* tp_mro */
0, /* tp_cache */
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
0 /* tp_version_tag */
Py_TPFLAGS_HEAPTYPE, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
variable_methods, /* tp_methods */
0, /* tp_members */
variable_properties, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
eagertensor_new, /* tp_new */
0, /* tp_free */
0, /* tp_is_gc */
0, /* tp_bases */
0, /* tp_mro */
0, /* tp_cache */
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
0 /* tp_version_tag */
};

void BindEager(pybind11::module* module) {
auto m = module->def_submodule("eager");

pEagerTensorType = &EagerTensorType;
if (PyType_Ready(&EagerTensorType) < 0) {
p_eager_tensor_type = &eager_tensor_type;
if (PyType_Ready(&eager_tensor_type) < 0) {
PADDLE_THROW(platform::errors::Fatal(
"Init Paddle erroe in BindEager(PyType_Ready)."));
return;
}

Py_INCREF(&EagerTensorType);
Py_INCREF(&eager_tensor_type);
if (PyModule_AddObject(m.ptr(), "EagerTensor",
reinterpret_cast<PyObject*>(&EagerTensorType)) < 0) {
Py_DECREF(&EagerTensorType);
reinterpret_cast<PyObject*>(&eager_tensor_type)) < 0) {
Py_DECREF(&eager_tensor_type);
Py_DECREF(m.ptr());
PADDLE_THROW(platform::errors::Fatal(
"Init Paddle erroe in BindEager(PyModule_AddObject)."));
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/pybind/eager_functions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ bool check_numpy_available() {
return ret;
}

extern PyTypeObject* pEagerTensorType;
extern PyTypeObject* p_eager_tensor_type;

static PyObject* eager_api_set_expected_place(PyObject* self, PyObject* args,
PyObject* kwargs) {
Expand Down Expand Up @@ -192,9 +192,9 @@ static inline PyObject* eager_api_numpy_to_tensor(PyObject* numpy_data,
std::shared_ptr<pten::DenseTensor> densetensor(
new pten::DenseTensor(std::move(shared_storage), std::move(meta)));

PyObject* obj = pEagerTensorType->tp_alloc(pEagerTensorType, 0);
PyObject* obj = p_eager_tensor_type->tp_alloc(p_eager_tensor_type, 0);
if (obj) {
auto v = (EagerTensorObject*)obj; // NOLINT
auto v = reinterpret_cast<EagerTensorObject*>(obj);
new (&(v->eagertensor)) egr::EagerTensor();
v->eagertensor.set_impl(densetensor);
v->eagertensor.set_name(egr::Controller::Instance().GenerateUniqueName());
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/eager_properties.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ int init_numpy_p() {
}
static const int numpy_initialized_m = init_numpy_p();

extern PyTypeObject* pEagerTensorType;
extern PyTypeObject* p_eager_tensor_type;

PyObject* eager_tensor_properties_get_name(EagerTensorObject* self,
void* closure) {
Expand Down
21 changes: 11 additions & 10 deletions paddle/fluid/pybind/eager_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ limitations under the License. */
namespace paddle {
namespace pybind {

extern PyTypeObject* pEagerTensorType;
extern PyTypeObject* p_eager_tensor_type;

bool PyObject_CheckLongOrConvertToLong(PyObject** obj) {
if ((PyLong_Check(*obj) && !PyBool_Check(*obj))) {
Expand All @@ -49,7 +49,7 @@ bool PyObject_CheckLongOrConvertToLong(PyObject** obj) {

bool PyObject_CheckFloatOrConvertToFloat(PyObject** obj) {
// sometimes users provide PyLong or numpy.int64 but attr is float
if (PyFloat_Check(*obj) || PyLong_Check(*obj)) { // NOLINT
if (PyFloat_Check(*obj) || PyLong_Check(*obj)) {
return true;
}
if (std::string((reinterpret_cast<PyTypeObject*>((*obj)->ob_type))->tp_name)
Expand Down Expand Up @@ -94,7 +94,7 @@ int CastPyArg2AttrInt(PyObject* obj, ssize_t arg_pos) {

int64_t CastPyArg2AttrLong(PyObject* obj, ssize_t arg_pos) {
if (PyObject_CheckLongOrConvertToLong(&obj)) {
return (int64_t)PyLong_AsLong(obj); // NOLINT
return reinterpret_cast<int64_t>(PyLong_AsLong(obj));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
Expand Down Expand Up @@ -130,7 +130,8 @@ std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos) {
}

egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos) {
if (PyObject_IsInstance(obj, reinterpret_cast<PyObject*>(pEagerTensorType))) {
if (PyObject_IsInstance(obj,
reinterpret_cast<PyObject*>(p_eager_tensor_type))) {
return reinterpret_cast<EagerTensorObject*>(obj)->eagertensor;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
Expand All @@ -148,8 +149,8 @@ std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyList_GetItem(obj, i);
if (PyObject_IsInstance(item,
reinterpret_cast<PyObject*>(pEagerTensorType))) {
if (PyObject_IsInstance(
item, reinterpret_cast<PyObject*>(p_eager_tensor_type))) {
result.emplace_back(
reinterpret_cast<EagerTensorObject*>(item)->eagertensor);
} else {
Expand All @@ -165,8 +166,8 @@ std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
item = PyTuple_GetItem(obj, i);
if (PyObject_IsInstance(item,
reinterpret_cast<PyObject*>(pEagerTensorType))) {
if (PyObject_IsInstance(
item, reinterpret_cast<PyObject*>(p_eager_tensor_type))) {
result.emplace_back(
reinterpret_cast<EagerTensorObject*>(item)->eagertensor);
} else {
Expand Down Expand Up @@ -211,7 +212,7 @@ PyObject* ToPyObject(const std::string& value) {
}

PyObject* ToPyObject(const egr::EagerTensor& value) {
PyObject* obj = pEagerTensorType->tp_alloc(pEagerTensorType, 0);
PyObject* obj = p_eager_tensor_type->tp_alloc(p_eager_tensor_type, 0);
if (obj) {
auto v = reinterpret_cast<EagerTensorObject*>(obj);
new (&(v->eagertensor)) egr::EagerTensor();
Expand Down Expand Up @@ -277,7 +278,7 @@ PyObject* ToPyObject(const std::vector<egr::EagerTensor>& value) {
PyObject* result = PyList_New((Py_ssize_t)value.size());

for (size_t i = 0; i < value.size(); i++) {
PyObject* obj = pEagerTensorType->tp_alloc(pEagerTensorType, 0);
PyObject* obj = p_eager_tensor_type->tp_alloc(p_eager_tensor_type, 0);
if (obj) {
auto v = reinterpret_cast<EagerTensorObject*>(obj);
new (&(v->eagertensor)) egr::EagerTensor();
Expand Down

0 comments on commit 9b476a0

Please sign in to comment.