From 1d1ef9c5f283b2b7b4fb54b0e21e93c24a85d92f Mon Sep 17 00:00:00 2001 From: jiweibo Date: Fri, 15 Oct 2021 09:19:02 +0000 Subject: [PATCH] update --- paddle/fluid/inference/tensorrt/engine.cc | 10 ++++++++++ paddle/fluid/pybind/inference_api.cc | 9 +++++---- python/paddle/fluid/inference/wrapper.py | 15 +++++++++++++++ 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index d075656d15747c..f79d3e18afa0f0 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -196,6 +196,16 @@ void TensorRTEngine::FreezeNetwork() { #if IS_TRT_VERSION_GE(6000) LOG(INFO) << "Run Paddle-TRT Dynamic Shape mode."; for (auto &input : min_input_shape_) { + if (!(std::all_of(input.second.begin(), input.second.end(), + [](int x) { return x > 0; }) && + std::all_of(max_input_shape_[input.first].begin(), + max_input_shape_[input.first].end(), + [](int x) { return x > 0; }) && + std::all_of(optim_input_shape_[input.first].begin(), + optim_input_shape_[input.first].end(), + [](int x) { return x > 0; }))) { + continue; + } VLOG(4) << "TRT dynamic_shape set " << input.first << " min: " << Vec2Str(input.second) << ", max: " << Vec2Str(max_input_shape_[input.first]) diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index 8ce7bea2d8e703..3dcc113c8d13f0 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -739,10 +739,11 @@ void BindZeroCopyTensor(py::module *m) { void BindPaddleInferTensor(py::module *m) { py::class_(*m, "PaddleInferTensor") .def("reshape", &paddle_infer::Tensor::Reshape) - .def("copy_from_cpu", &PaddleInferTensorCreate) - .def("copy_from_cpu", &PaddleInferTensorCreate) - .def("copy_from_cpu", &PaddleInferTensorCreate) - .def("copy_from_cpu", &PaddleInferTensorCreate) + .def("copy_from_cpu_bind", &PaddleInferTensorCreate) + .def("copy_from_cpu_bind", &PaddleInferTensorCreate) + .def("copy_from_cpu_bind", &PaddleInferTensorCreate) + .def("copy_from_cpu_bind", + &PaddleInferTensorCreate) .def("copy_to_cpu", &PaddleInferTensorToNumpy) .def("shape", &paddle_infer::Tensor::shape) .def("set_lod", &paddle_infer::Tensor::SetLoD) diff --git a/python/paddle/fluid/inference/wrapper.py b/python/paddle/fluid/inference/wrapper.py index 96885edcc5e822..2c1b2c77504d92 100644 --- a/python/paddle/fluid/inference/wrapper.py +++ b/python/paddle/fluid/inference/wrapper.py @@ -15,9 +15,24 @@ from ..core import AnalysisConfig, PaddleDType, PaddlePlace from ..core import PaddleInferPredictor, PaddleInferTensor +import numpy as np + DataType = PaddleDType PlaceType = PaddlePlace PrecisionType = AnalysisConfig.Precision Config = AnalysisConfig Tensor = PaddleInferTensor Predictor = PaddleInferPredictor + + +def tensor_copy_from_cpu(self, data): + ''' + Support input type check based on tensor.copy_from_cpu. + ''' + if not isinstance(data, np.ndarray): + raise TypeError( + "In copy_from_cpu, we only support numpy ndarray data type.") + self.copy_from_cpu_bind(data) + + +Tensor.copy_from_cpu = tensor_copy_from_cpu