diff --git a/python/paddle/device/__init__.py b/python/paddle/device/__init__.py index f6c3bfc78a9a64..e5679f0efc7700 100644 --- a/python/paddle/device/__init__.py +++ b/python/paddle/device/__init__.py @@ -308,7 +308,7 @@ def get_device(): """ device = '' - place = framework._current_expected_place() + place = framework._current_expected_place_() if isinstance(place, core.CPUPlace): device = 'cpu' elif isinstance(place, core.CUDAPlace): diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index d4aba965daf51d..5088cea790fd2c 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -2451,7 +2451,7 @@ def lu(x, pivot=True, get_infos=False, name=None): >>> # one can verify : X = P @ L @ U ; """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): lu, p, info = _C_ops.lu(x, pivot) else: check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') @@ -2554,7 +2554,7 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): raise ValueError( f"The shape of Pivots should be (*, K), but received ndim is [{y.ndim} < 1]" ) - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): P, L, U = _C_ops.lu_unpack(x, y, unpack_ludata, unpack_pivots) return P, L, U else: @@ -3454,7 +3454,16 @@ def lstsq(x, y, rcond=None, driver=None, name=None): else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") - if not (x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64)): + if not ( + x.dtype == y.dtype + and x.dtype + in ( + paddle.float32, + paddle.float64, + paddle.base.core.DataType.FLOAT32, + paddle.base.core.DataType.FLOAT64, + ) + ): raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) @@ -3475,17 +3484,29 @@ def lstsq(x, y, rcond=None, driver=None, name=None): ) if rcond is None: - if x.dtype == paddle.float32: + if ( + x.dtype == paddle.float32 + or x.dtype == paddle.base.core.DataType.FLOAT32 + ): rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) - elif x.dtype == paddle.float64: + elif ( + x.dtype == paddle.float64 + or x.dtype == paddle.base.core.DataType.FLOAT64 + ): rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): solution, residuals, rank, singular_values = _C_ops.lstsq( x, y, rcond, driver ) if driver == "gels": - rank = paddle.empty(shape=[0], dtype=paddle.int32) + if in_dynamic_mode(): + rank = paddle.empty(shape=[0], dtype=paddle.int32) + + else: + rank = paddle.empty( + shape=[0], dtype=paddle.base.core.DataType.INT32 + ) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) diff --git a/test/legacy_test/test_linalg_lstsq_op.py b/test/legacy_test/test_linalg_lstsq_op.py index ca9ec7dfb26d51..c54724a16f6fbb 100644 --- a/test/legacy_test/test_linalg_lstsq_op.py +++ b/test/legacy_test/test_linalg_lstsq_op.py @@ -19,6 +19,7 @@ import paddle from paddle import base from paddle.base import core +from paddle.pir_utils import test_with_pir_api class LinalgLstsqTestCase(unittest.TestCase): @@ -26,7 +27,7 @@ def setUp(self): self.devices = ["cpu"] self.init_config() if core.is_compiled_with_cuda() and self.driver == "gels": - self.devices.append("gpu:0") + self.devices.append("gpu") self.generate_input() self.generate_output() np.random.seed(2022) @@ -91,12 +92,15 @@ def test_eager_dygraph(self): self._result_sg_values = results[3].numpy() self.assert_np_close() + @test_with_pir_api def test_static(self): paddle.enable_static() for dev in self.devices: paddle.set_device(dev) place = base.CPUPlace() if dev == "cpu" else base.CUDAPlace(0) - with base.program_guard(base.Program(), base.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): x = paddle.static.data( name="x", shape=self._input_shape_1, @@ -112,7 +116,6 @@ def test_static(self): ) exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"x": self._input_data_1, "y": self._input_data_2}, fetch_list=[results], ) @@ -282,6 +285,7 @@ class TestLinalgLstsqAPIError(unittest.TestCase): def setUp(self): pass + @test_with_pir_api def test_api_errors(self): def test_x_bad_shape(): x = paddle.to_tensor(np.random.random(size=(5)), dtype=np.float32) diff --git a/test/legacy_test/test_lu_op.py b/test/legacy_test/test_lu_op.py index b875be084fffc2..a140afb1e675f1 100644 --- a/test/legacy_test/test_lu_op.py +++ b/test/legacy_test/test_lu_op.py @@ -24,6 +24,7 @@ import paddle from paddle import base from paddle.base import core +from paddle.pir_utils import test_with_pir_api def scipy_lu(A, pivot): @@ -156,10 +157,10 @@ def setUp(self): } def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): - self.check_grad(['X'], ['Out']) + self.check_grad(['X'], ['Out'], check_pir=True) # m = n 2D @@ -238,6 +239,7 @@ def run_lu_dygraph(shape, dtype): for tensor_shape, dtype in itertools.product(tensor_shapes, dtypes): run_lu_dygraph(tensor_shape, dtype) + @test_with_pir_api def test_static(self): paddle.enable_static() @@ -257,7 +259,9 @@ def run_lu_static(shape, dtype): if core.is_compiled_with_cuda(): places.append(base.CUDAPlace(0)) for place in places: - with base.program_guard(base.Program(), base.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): batch_size = a.size // (a.shape[-1] * a.shape[-2]) sP, sl, sU = scipy_lu(a, pivot) sL = np.tril(sl, -1) @@ -284,7 +288,6 @@ def run_lu_static(shape, dtype): lu, p = paddle.linalg.lu(x, pivot=pivot) exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"input": a}, fetch_list=[lu, p], ) diff --git a/test/legacy_test/test_lu_unpack_op.py b/test/legacy_test/test_lu_unpack_op.py index 57cabc4872d98f..91888b26d86604 100644 --- a/test/legacy_test/test_lu_unpack_op.py +++ b/test/legacy_test/test_lu_unpack_op.py @@ -24,6 +24,7 @@ import paddle from paddle import base from paddle.base import core +from paddle.pir_utils import test_with_pir_api def scipy_lu_unpack(A): @@ -138,7 +139,9 @@ def setUp(self): lu = lu.numpy() pivots = pivots.numpy() else: - with base.program_guard(base.Program(), base.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): place = base.CPUPlace() if core.is_compiled_with_cuda(): place = base.CUDAPlace(0) @@ -148,7 +151,6 @@ def setUp(self): lu, p = paddle.linalg.lu(xv) exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"input": x}, fetch_list=[lu, p], ) @@ -168,7 +170,7 @@ def setUp(self): } def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): self.check_grad(['X'], ['L', 'U']) @@ -258,6 +260,7 @@ def run_lu_unpack_dygraph(shape, dtype): for tensor_shape, dtype in itertools.product(tensor_shapes, dtypes): run_lu_unpack_dygraph(tensor_shape, dtype) + @test_with_pir_api def test_static(self): paddle.enable_static() @@ -275,7 +278,9 @@ def run_lu_static(shape, dtype): if core.is_compiled_with_cuda(): places.append(base.CUDAPlace(0)) for place in places: - with base.program_guard(base.Program(), base.Program()): + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): sP, sL, sU = scipy_lu_unpack(a) x = paddle.static.data( @@ -285,7 +290,6 @@ def run_lu_static(shape, dtype): pP, pL, pU = paddle.linalg.lu_unpack(lu, p) exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"input": a}, fetch_list=[pP, pL, pU], )