From 98303291d27cb831b19111d82793159cbe9a85ca Mon Sep 17 00:00:00 2001 From: hong <43953930+phlrain@users.noreply.github.com> Date: Fri, 1 Apr 2022 08:52:17 +0800 Subject: [PATCH 1/6] Add basic yaml backward (#40751) * fix error; test=develop * update * close some yaml * fix backward attrite error; test=develop * add div test * polish code; test=develop * update * update * fix bug * update bitwise code; test=develop * update * update * fix some bug * update * revert cmakelist * fix optional bug; * fix bug * fix bug; * add backward test * open bn * update * update * revert eager_gen * polish code * fix topk error * update * update * fix bug; * move label smooth, nll loss * revert topk * fix topk label smooth bug; * remove batch_norm * remove topk * change flip infer meta * fix flip bug * update yaml * close abs * fix histogram bug * fix histogram bug * add abs * fix histogram kernel * remove expand --- .../kernels/cpu/index_sample_grad_kernel.cc | 2 +- .../kernels/cpu/masked_select_grad_kernel.cc | 2 +- .../phi/kernels/cpu/nll_loss_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/top_k_grad_kernel.cc | 6 +- .../kernels/gpu/index_sample_grad_kernel.cu | 4 +- .../kernels/gpu/masked_select_grad_kernel.cu | 2 +- .../phi/kernels/gpu/nll_loss_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/top_k_grad_kernel.cu | 6 +- paddle/phi/kernels/histogram_kernel.h | 12 +- paddle/phi/kernels/index_sample_grad_kernel.h | 2 +- .../phi/kernels/masked_select_grad_kernel.h | 2 +- paddle/phi/kernels/nll_loss_grad_kernel.h | 2 +- paddle/phi/kernels/top_k_grad_kernel.h | 5 +- paddle/phi/ops/compat/index_sample_sig.cc | 2 +- paddle/phi/ops/compat/masked_select_sig.cc | 2 +- paddle/phi/ops/compat/nll_loss_sig.cc | 2 +- paddle/phi/ops/compat/top_k_sig.cc | 2 +- python/paddle/fluid/layers/nn.py | 11 +- .../tests/unittests/test_batch_norm_op_v2.py | 27 ++-- .../unittests/test_elementwise_div_op.py | 19 ++- .../tests/unittests/test_expand_as_v2_op.py | 1 + .../tests/unittests/test_histogram_op.py | 13 +- .../tests/unittests/test_index_sample_op.py | 4 +- .../tests/unittests/test_isfinite_v2_op.py | 16 +++ .../fluid/tests/unittests/test_lerp_op.py | 5 +- .../fluid/tests/unittests/test_logical_op.py | 20 +++ .../tests/unittests/test_masked_select_op.py | 6 +- .../fluid/tests/unittests/test_nll_loss.py | 108 ++++++++------- .../fluid/tests/unittests/test_top_k_op.py | 2 + .../fluid/tests/unittests/test_top_k_v2_op.py | 125 ++++++++++-------- .../tests/unittests/test_viterbi_decode_op.py | 8 +- .../fluid/tests/unittests/test_yolo_box_op.py | 5 +- python/paddle/nn/functional/common.py | 4 +- python/paddle/nn/functional/loss.py | 5 +- python/paddle/nn/functional/norm.py | 2 +- python/paddle/tensor/linalg.py | 5 +- python/paddle/tensor/logic.py | 9 ++ python/paddle/tensor/manipulation.py | 4 +- python/paddle/tensor/math.py | 20 ++- python/paddle/tensor/search.py | 11 +- python/paddle/text/viterbi_decode.py | 6 +- python/paddle/utils/code_gen/api.yaml | 9 ++ python/paddle/vision/ops.py | 8 +- 43 files changed, 344 insertions(+), 166 deletions(-) diff --git a/paddle/phi/kernels/cpu/index_sample_grad_kernel.cc b/paddle/phi/kernels/cpu/index_sample_grad_kernel.cc index 006711ceef75ed..d060e8c9b28370 100644 --- a/paddle/phi/kernels/cpu/index_sample_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/index_sample_grad_kernel.cc @@ -68,9 +68,9 @@ void IndexSampleGradInner(const Context& context, template void IndexSampleGradKernel(const Context& ctx, - const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& index, + const DenseTensor& out_grad, DenseTensor* x_grad) { auto index_type = index.dtype(); bool index_type_match = diff --git a/paddle/phi/kernels/cpu/masked_select_grad_kernel.cc b/paddle/phi/kernels/cpu/masked_select_grad_kernel.cc index 7fe41e686af8c5..bbb08f06167769 100644 --- a/paddle/phi/kernels/cpu/masked_select_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/masked_select_grad_kernel.cc @@ -21,9 +21,9 @@ namespace phi { template void MaskedSelectGradKernel(const Context& dev_ctx, - const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& mask, + const DenseTensor& out_grad, DenseTensor* x_grad) { auto* mask_data = mask.data(); auto* input_data = out_grad.data(); diff --git a/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc b/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc index e7d74759f516ac..5b859b6ec270e5 100644 --- a/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc @@ -121,8 +121,8 @@ template void NllLossGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& labels, - const DenseTensor& total_weight, paddle::optional weight, + const DenseTensor& total_weight, const DenseTensor& d_out, int64_t ignore_index, const std::string& reduction, diff --git a/paddle/phi/kernels/cpu/top_k_grad_kernel.cc b/paddle/phi/kernels/cpu/top_k_grad_kernel.cc index 582ee1157cce8b..e44f85fb6c0fb0 100644 --- a/paddle/phi/kernels/cpu/top_k_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/top_k_grad_kernel.cc @@ -51,17 +51,17 @@ static void FullTopKAssign(const Type& input_height, template void TopkGradKernel(const Context& dev_ctx, - const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& indices, - int k, + const DenseTensor& out_grad, + const Scalar& k_scalar, int axis, bool largest, bool sorted, DenseTensor* x_grad) { const auto& in_dims = x.dims(); const auto& out_dims = indices.dims(); - + int k = k_scalar.to(); // axis < 0, get the real axis axis = (axis < 0) ? (in_dims.size() + axis) : axis; diff --git a/paddle/phi/kernels/gpu/index_sample_grad_kernel.cu b/paddle/phi/kernels/gpu/index_sample_grad_kernel.cu index 8b1ef964124d7d..669ae115439500 100644 --- a/paddle/phi/kernels/gpu/index_sample_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/index_sample_grad_kernel.cu @@ -36,7 +36,7 @@ void LimitGridDim(const Context& ctx, dim3* grid_dim) { #define PREDEFINED_BLOCK_SIZE_X 512 #define PREDEFINED_BLOCK_SIZE 1024 #define MIN(a, b) ((a) < (b) ? (a) : (b)) -}; +} // namespace template __global__ void IndexSampleGrad(const IndexT* index, @@ -67,9 +67,9 @@ __global__ void IndexSampleGrad(const IndexT* index, template void IndexSampleGradKernel(const Context& ctx, - const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& index, + const DenseTensor& out_grad, DenseTensor* x_grad) { const T* output_grad_data = out_grad.data(); T* input_grad_data = ctx.template Alloc(x_grad); diff --git a/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu b/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu index 5a4ce3a2679b94..171baab5513e44 100644 --- a/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/masked_select_grad_kernel.cu @@ -44,9 +44,9 @@ struct MaskedSelectGradFunctor { template void MaskedSelectGradKernel(const Context& dev_ctx, - const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& mask, + const DenseTensor& out_grad, DenseTensor* x_grad) { auto mask_size = mask.numel(); dev_ctx.template Alloc(x_grad); diff --git a/paddle/phi/kernels/gpu/nll_loss_grad_kernel.cu b/paddle/phi/kernels/gpu/nll_loss_grad_kernel.cu index 9a2d9c6e479aa4..43106ec1d863fd 100644 --- a/paddle/phi/kernels/gpu/nll_loss_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/nll_loss_grad_kernel.cu @@ -23,8 +23,8 @@ template void NllLossGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& labels, - const DenseTensor& total_weight, paddle::optional weight, + const DenseTensor& total_weight, const DenseTensor& dout, int64_t ignore_index, const std::string& reduction, diff --git a/paddle/phi/kernels/gpu/top_k_grad_kernel.cu b/paddle/phi/kernels/gpu/top_k_grad_kernel.cu index b0b45223489e93..32c5fc0006f4cf 100644 --- a/paddle/phi/kernels/gpu/top_k_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/top_k_grad_kernel.cu @@ -25,10 +25,10 @@ namespace ops = paddle::operators; template void TopkGradKernel(const Context& dev_ctx, - const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& indices, - int k, + const DenseTensor& out_grad, + const Scalar& k_scalar, int axis, bool largest, bool sorted, @@ -36,6 +36,8 @@ void TopkGradKernel(const Context& dev_ctx, const auto& in_dims = x.dims(); const auto& out_dims = indices.dims(); + int k = k_scalar.to(); + // get the real the axis and the k if (axis < 0) { axis += in_dims.size(); diff --git a/paddle/phi/kernels/histogram_kernel.h b/paddle/phi/kernels/histogram_kernel.h index b6b4593361dad8..0020f7b0435da2 100644 --- a/paddle/phi/kernels/histogram_kernel.h +++ b/paddle/phi/kernels/histogram_kernel.h @@ -18,11 +18,11 @@ namespace phi { template -void HistogramSelectKernel(const Context& dev_ctx, - const DenseTensor& input, - int64_t bins, - int min, - int max, - DenseTensor* out); +void HistogramKernel(const Context& dev_ctx, + const DenseTensor& input, + int64_t bins, + int min, + int max, + DenseTensor* output); } // namespace phi diff --git a/paddle/phi/kernels/index_sample_grad_kernel.h b/paddle/phi/kernels/index_sample_grad_kernel.h index 5c6e101f1b43df..2b66076ee0a2b3 100644 --- a/paddle/phi/kernels/index_sample_grad_kernel.h +++ b/paddle/phi/kernels/index_sample_grad_kernel.h @@ -20,9 +20,9 @@ namespace phi { template void IndexSampleGradKernel(const Context& ctx, - const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& index, + const DenseTensor& out_grad, DenseTensor* in_grad); } // namespace phi diff --git a/paddle/phi/kernels/masked_select_grad_kernel.h b/paddle/phi/kernels/masked_select_grad_kernel.h index f9db1fcd2acc7a..db7d105093d2ad 100644 --- a/paddle/phi/kernels/masked_select_grad_kernel.h +++ b/paddle/phi/kernels/masked_select_grad_kernel.h @@ -19,9 +19,9 @@ namespace phi { template void MaskedSelectGradKernel(const Context& dev_ctx, - const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& mask, + const DenseTensor& out_grad, DenseTensor* x_grad); } // namspace phi diff --git a/paddle/phi/kernels/nll_loss_grad_kernel.h b/paddle/phi/kernels/nll_loss_grad_kernel.h index 127dc2f961f101..c06f0726899ee2 100644 --- a/paddle/phi/kernels/nll_loss_grad_kernel.h +++ b/paddle/phi/kernels/nll_loss_grad_kernel.h @@ -22,8 +22,8 @@ template void NllLossGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& label, - const DenseTensor& total_weight, paddle::optional weight, + const DenseTensor& total_weight, const DenseTensor& d_out, int64_t ignore_index, const std::string& reduction, diff --git a/paddle/phi/kernels/top_k_grad_kernel.h b/paddle/phi/kernels/top_k_grad_kernel.h index f577b982c575dc..e4fde92dad8fde 100644 --- a/paddle/phi/kernels/top_k_grad_kernel.h +++ b/paddle/phi/kernels/top_k_grad_kernel.h @@ -14,16 +14,17 @@ #pragma once +#include "paddle/phi/common/scalar.h" #include "paddle/phi/core/dense_tensor.h" namespace phi { template void TopkGradKernel(const Context& dev_ctx, - const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& indices, - int k, + const DenseTensor& out_grad, + const Scalar& k, int axis, bool largest, bool sorted, diff --git a/paddle/phi/ops/compat/index_sample_sig.cc b/paddle/phi/ops/compat/index_sample_sig.cc index 0d2aed68a72a5e..3b7e3f063d6c10 100644 --- a/paddle/phi/ops/compat/index_sample_sig.cc +++ b/paddle/phi/ops/compat/index_sample_sig.cc @@ -19,7 +19,7 @@ namespace phi { KernelSignature IndexSampleGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("index_sample_grad", - {GradVarName("Out"), "X", "Index"}, + {"X", "Index", GradVarName("Out")}, {}, {GradVarName("X")}); } diff --git a/paddle/phi/ops/compat/masked_select_sig.cc b/paddle/phi/ops/compat/masked_select_sig.cc index 8083b123bcff53..ec0eb90315bc1b 100644 --- a/paddle/phi/ops/compat/masked_select_sig.cc +++ b/paddle/phi/ops/compat/masked_select_sig.cc @@ -24,7 +24,7 @@ KernelSignature MaskedSelectOpArgumentMapping( KernelSignature MaskedSelectGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("masked_select_grad", - {GradVarName("Y"), "X", "Mask"}, + {"X", "Mask", GradVarName("Y")}, {}, {GradVarName("X")}); } diff --git a/paddle/phi/ops/compat/nll_loss_sig.cc b/paddle/phi/ops/compat/nll_loss_sig.cc index f274d7f77c5c0a..87a060ce7a672f 100644 --- a/paddle/phi/ops/compat/nll_loss_sig.cc +++ b/paddle/phi/ops/compat/nll_loss_sig.cc @@ -29,7 +29,7 @@ KernelSignature NllLossGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature( "nll_loss_grad", - {"X", "Label", "Total_weight", "Weight", GradVarName("Out")}, + {"X", "Label", "Weight", "Total_weight", GradVarName("Out")}, {"ignore_index", "reduction"}, {GradVarName("X")}); } diff --git a/paddle/phi/ops/compat/top_k_sig.cc b/paddle/phi/ops/compat/top_k_sig.cc index 9bf922b3d1b589..8488a18e34ce10 100644 --- a/paddle/phi/ops/compat/top_k_sig.cc +++ b/paddle/phi/ops/compat/top_k_sig.cc @@ -29,7 +29,7 @@ KernelSignature TopkOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature TopkGradOpArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("top_k_grad", - {GradVarName("Out"), "X", "Indices"}, + {"X", "Indices", GradVarName("Out")}, {"k", "axis", "largest", "sorted"}, {GradVarName("X")}); } diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d1ef9d6d8b4ea7..cb3781d5c299b8 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -12529,6 +12529,9 @@ def logical_and(x, y, out=None, name=None): res = paddle.logical_and(x, y) print(res) # [True False True False] """ + if in_dygraph_mode(): + return _C_ops.final_state_logical_and(x, y) + return _logical_op( op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True) @@ -12568,6 +12571,8 @@ def logical_or(x, y, out=None, name=None): res = paddle.logical_or(x, y) print(res) # [[ True True] [ True False]] """ + if in_dygraph_mode(): + return _C_ops.final_state_logical_or(x, y) return _logical_op( op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True) @@ -12607,6 +12612,9 @@ def logical_xor(x, y, out=None, name=None): res = paddle.logical_xor(x, y) print(res) # [[False, True], [ True, False]] """ + if in_dygraph_mode(): + return _C_ops.final_state_logical_xor(x, y) + return _logical_op( op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True) @@ -12639,7 +12647,8 @@ def logical_not(x, out=None, name=None): res = paddle.logical_not(x) print(res) # [False True False True] """ - + if in_dygraph_mode(): + return _C_ops.final_state_logical_not(x) return _logical_op( op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False) diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py index c9abac8fb7946d..dda10fdd84fff0 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py @@ -19,7 +19,7 @@ from paddle.fluid.op import Operator import paddle.fluid as fluid from op_test import OpTest, _set_use_system_allocator -from paddle.fluid.framework import grad_var_name +from paddle.fluid.framework import grad_var_name, _test_eager_guard import paddle.fluid as fluid from paddle.fluid import Program, program_guard import paddle @@ -46,32 +46,32 @@ def test_error(self): def error1d_dataformat(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') batch_norm1d = paddle.nn.BatchNorm1D(1, data_format='NCDHW') - batch_norm1d(fluid.dygraph.to_variable(x_data_4)) + batch_norm1d(paddle.to_tensor(x_data_4)) def error2d_dataformat(): x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') batch_norm2d = paddle.nn.BatchNorm2D(1, data_format='NCDHW') - batch_norm2d(fluid.dygraph.to_variable(x_data_3)) + batch_norm2d(paddle.to_tensor(x_data_3)) def error3d_dataformat(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') batch_norm3d = paddle.nn.BatchNorm3D(1, data_format='NCL') - batch_norm3d(fluid.dygraph.to_variable(x_data_4)) + batch_norm3d(paddle.to_tensor(x_data_4)) def error1d(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') batch_norm1d = paddle.nn.BatchNorm1D(1) - batch_norm1d(fluid.dygraph.to_variable(x_data_4)) + batch_norm1d(paddle.to_tensor(x_data_4)) def error2d(): x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') batch_norm2d = paddle.nn.BatchNorm2D(1) - batch_norm2d(fluid.dygraph.to_variable(x_data_3)) + batch_norm2d(paddle.to_tensor(x_data_3)) def error3d(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') batch_norm3d = paddle.nn.BatchNorm3D(1) - batch_norm3d(fluid.dygraph.to_variable(x_data_4)) + batch_norm3d(paddle.to_tensor(x_data_4)) with fluid.dygraph.guard(p): self.assertRaises(ValueError, error1d) @@ -94,13 +94,18 @@ def compute_v1(x, is_test, trainable_statistics): shape[1], is_test=is_test, trainable_statistics=trainable_statistics) - y = bn(fluid.dygraph.to_variable(x)) + y = bn(paddle.to_tensor(x)) return y.numpy() def compute_v2(x): with fluid.dygraph.guard(p): bn = paddle.nn.BatchNorm2D(shape[1]) - y = bn(fluid.dygraph.to_variable(x)) + y = bn(paddle.to_tensor(x)) + + with _test_eager_guard(): + bn = paddle.nn.BatchNorm2D(shape[1]) + eag_y = bn(paddle.to_tensor(x)) + assert np.allclose(eag_y.numpy(), y.numpy()) return y.numpy() def compute_v3(x, is_test, trainable_statistics): @@ -115,14 +120,14 @@ def compute_v3(x, is_test, trainable_statistics): initializer=fluid.initializer.Constant(0.0), trainable=False), trainable_statistics=trainable_statistics) - y = bn(fluid.dygraph.to_variable(x)) + y = bn(paddle.to_tensor(x)) return y.numpy() def compute_v4(x): with fluid.dygraph.guard(p): bn = paddle.nn.BatchNorm2D( shape[1], weight_attr=False, bias_attr=False) - y = bn(fluid.dygraph.to_variable(x)) + y = bn(paddle.to_tensor(x)) return y.numpy() x = np.random.randn(*shape).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index a86758a9cb92b6..d50241e58dea3a 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -32,6 +32,7 @@ def setUp(self): 'X': np.random.random((32,84)).astype("float32"), 'Y': np.random.random((32,84)).astype("float32") """ + self.inputs = { 'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype), 'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) @@ -39,7 +40,7 @@ def setUp(self): self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} def check_eager(self): - return (self.use_mkldnn == False and self.axis == -1) + return (not hasattr(self, "attrs") or (self.attrs["axis"] != -1)) def test_check_output(self): self.check_output(check_eager=False) @@ -65,6 +66,7 @@ def init_dtype(self): class TestElementwiseDivOpBF16(OpTest): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.dtype = np.uint16 x = np.random.uniform(0.1, 1, [12, 13]).astype(np.float32) @@ -100,6 +102,7 @@ def test_check_grad_ingore_y(self): class TestElementwiseDivOp_scalar(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(np.float64), 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float64) @@ -110,6 +113,7 @@ def setUp(self): class TestElementwiseDivOp_Vector(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [100]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") @@ -120,6 +124,7 @@ def setUp(self): class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") @@ -135,6 +140,7 @@ def setUp(self): class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") @@ -150,6 +156,7 @@ def setUp(self): class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") @@ -164,6 +171,7 @@ def setUp(self): class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [10, 12]).astype("float64") @@ -179,6 +187,7 @@ def setUp(self): class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype("float64") @@ -189,6 +198,7 @@ def setUp(self): class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype("float64") @@ -199,6 +209,7 @@ def setUp(self): class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype("float64"), @@ -209,6 +220,7 @@ def setUp(self): class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype("float64"), @@ -219,6 +231,7 @@ def setUp(self): class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.inputs = { 'X': np.random.uniform(0.1, 1, [10, 12]).astype("float64"), 'Y': np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype("float64"), @@ -232,6 +245,7 @@ def setUp(self): class TestElementwiseDivOp_INT(OpTest): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.dtype = np.int32 self.init_dtype() self.inputs = { @@ -304,6 +318,7 @@ def test_dygraph(self): class TestComplexElementwiseDivOp(OpTest): def setUp(self): self.op_type = "elementwise_div" + self.python_api = paddle.divide self.init_base_dtype() self.init_input_output() self.init_grad_input_output() @@ -334,7 +349,7 @@ def init_grad_input_output(self): self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y) def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_grad_normal(self): self.check_grad( diff --git a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py index 62cd465a176d5c..416a60b8ba2001 100755 --- a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py @@ -24,6 +24,7 @@ class TestExpandAsOpRank1(OpTest): def setUp(self): self.op_type = "expand_as_v2" + self.python_api = paddle.expand_as x = np.random.rand(100).astype("float64") target_tensor = np.random.rand(2, 100).astype("float64") self.inputs = {'X': x} diff --git a/python/paddle/fluid/tests/unittests/test_histogram_op.py b/python/paddle/fluid/tests/unittests/test_histogram_op.py index 7da9dbd62e9f98..819029c5fcd9dc 100644 --- a/python/paddle/fluid/tests/unittests/test_histogram_op.py +++ b/python/paddle/fluid/tests/unittests/test_histogram_op.py @@ -21,6 +21,7 @@ import paddle.fluid.core as core from paddle.fluid import Program, program_guard from op_test import OpTest +from paddle.fluid.framework import _test_eager_guard class TestHistogramOpAPI(unittest.TestCase): @@ -57,6 +58,15 @@ def test_dygraph(self): (actual.numpy() == expected).all(), msg='histogram output is wrong, out =' + str(actual.numpy())) + with _test_eager_guard(): + inputs_np = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int64) + inputs = paddle.to_tensor(inputs_np) + actual = paddle.histogram(inputs, bins=5, min=1, max=5) + self.assertTrue( + (actual.numpy() == expected).all(), + msg='histogram output is wrong, out =' + + str(actual.numpy())) + class TestHistogramOpError(unittest.TestCase): """Test histogram op error.""" @@ -118,6 +128,7 @@ def setUp(self): self.op_type = "histogram" self.init_test_case() np_input = np.random.uniform(low=0.0, high=20.0, size=self.in_shape) + self.python_api = paddle.histogram self.inputs = {"X": np_input} self.init_attrs() Out, _ = np.histogram( @@ -134,7 +145,7 @@ def init_attrs(self): self.attrs = {"bins": self.bins, "min": self.min, "max": self.max} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_index_sample_op.py b/python/paddle/fluid/tests/unittests/test_index_sample_op.py index e2ccb153f40631..4da03c9643fa97 100644 --- a/python/paddle/fluid/tests/unittests/test_index_sample_op.py +++ b/python/paddle/fluid/tests/unittests/test_index_sample_op.py @@ -40,10 +40,10 @@ def setUp(self): self.outputs = {'Out': out} def test_check_output(self): - self.check_output(check_eager=False) + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_eager=False) + self.check_grad(['X'], 'Out', check_eager=True) def config(self): """ diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py index 0d4d3b58e862ca..c861f912803f9c 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py @@ -16,6 +16,7 @@ import paddle.fluid as fluid import unittest import numpy as np +from paddle.fluid.framework import _test_eager_guard def run_static(x_np, dtype, op_str, use_gpu=False): @@ -46,6 +47,18 @@ def run_dygraph(x_np, op_str, use_gpu=True): return dygraph_result +def run_eager(x_np, op_str, use_gpu=True): + with paddle.fluid.dygraph.guard(): + with _test_eager_guard(): + place = paddle.CPUPlace() + if use_gpu and fluid.core.is_compiled_with_cuda(): + place = paddle.CUDAPlace(0) + + x = paddle.to_tensor(x_np) + dygraph_result = getattr(paddle.tensor, op_str)(x) + return dygraph_result + + def np_data_generator(low, high, np_shape, type, sv_list, op_str, *args, **kwargs): x_np = np.random.uniform(low, high, np_shape).astype(getattr(np, type)) @@ -107,8 +120,10 @@ def test(test_case, op_str, use_gpu=False): x_np, result_np = np_data_generator(**meta_data) static_result = run_static(x_np, meta_data['type'], op_str, use_gpu) dygraph_result = run_dygraph(x_np, op_str, use_gpu) + eager_result = run_eager(x_np, op_str, use_gpu) test_case.assertTrue((static_result == result_np).all()) test_case.assertTrue((dygraph_result.numpy() == result_np).all()) + test_case.assertTrue((eager_result.numpy() == result_np).all()) class TestCPUNormal(unittest.TestCase): @@ -158,4 +173,5 @@ def test_isfinite_bad_x(): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_lerp_op.py b/python/paddle/fluid/tests/unittests/test_lerp_op.py index 0f740444123cbe..10ab2610a26e43 100644 --- a/python/paddle/fluid/tests/unittests/test_lerp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lerp_op.py @@ -27,6 +27,7 @@ class TestLerp(OpTest): def setUp(self): self.op_type = "lerp" + self.python_api = paddle.lerp self.init_dtype() self.init_shape() x = np.arange(1., 101.).astype(self.dtype).reshape(self.shape) @@ -42,10 +43,10 @@ def init_shape(self): self.shape = [100] def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X', 'Y'], 'Out') + self.check_grad(['X', 'Y'], 'Out', check_eager=True) class TestLerpWithDim2(TestLerp): diff --git a/python/paddle/fluid/tests/unittests/test_logical_op.py b/python/paddle/fluid/tests/unittests/test_logical_op.py index 174f3bc665ea16..91d339940d114c 100755 --- a/python/paddle/fluid/tests/unittests/test_logical_op.py +++ b/python/paddle/fluid/tests/unittests/test_logical_op.py @@ -20,6 +20,7 @@ import paddle import paddle.fluid as fluid from paddle.static import Program, program_guard +from paddle.fluid.framework import _test_eager_guard SUPPORTED_DTYPES = [ bool, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64 @@ -144,6 +145,22 @@ def run_dygraph(x_np, y_np, op_str, use_gpu=False, binary_op=True): return dygraph_result +def run_eager(x_np, y_np, op_str, use_gpu=False, binary_op=True): + place = paddle.CPUPlace() + if use_gpu and fluid.core.is_compiled_with_cuda(): + place = paddle.CUDAPlace(0) + paddle.disable_static(place) + with _test_eager_guard(): + op = getattr(paddle, op_str) + x = paddle.to_tensor(x_np, dtype=x_np.dtype) + if not binary_op: + dygraph_result = op(x) + else: + y = paddle.to_tensor(y_np, dtype=y_np.dtype) + dygraph_result = op(x, y) + return dygraph_result + + def np_data_generator(np_shape, dtype, *args, **kwargs): if dtype == bool: return np.random.choice(a=[True, False], size=np_shape).astype(bool) @@ -174,6 +191,7 @@ def test(unit_test, use_gpu=False, test_error=False): continue static_result = run_static(**meta_data) dygraph_result = run_dygraph(**meta_data) + eager_result = run_eager(**meta_data) if meta_data['binary_op']: np_result = np_op(meta_data['x_np'], meta_data['y_np']) else: @@ -181,6 +199,7 @@ def test(unit_test, use_gpu=False, test_error=False): unit_test.assertTrue((static_result == np_result).all()) unit_test.assertTrue((dygraph_result.numpy() == np_result).all( )) + unit_test.assertTrue((eager_result.numpy() == np_result).all()) def test_type_error(unit_test, use_gpu, type_str_map): @@ -259,4 +278,5 @@ def test_type_error(self): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_masked_select_op.py b/python/paddle/fluid/tests/unittests/test_masked_select_op.py index ed1a981d0306b3..764f4806ba4bad 100644 --- a/python/paddle/fluid/tests/unittests/test_masked_select_op.py +++ b/python/paddle/fluid/tests/unittests/test_masked_select_op.py @@ -33,6 +33,7 @@ class TestMaskedSelectOp(OpTest): def setUp(self): self.init() self.op_type = "masked_select" + self.python_api = paddle.masked_select x = np.random.random(self.shape).astype("float64") mask = np.array(np.random.randint(2, size=self.shape, dtype=bool)) out = np_masked_select(x, mask) @@ -40,10 +41,10 @@ def setUp(self): self.outputs = {'Y': out} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Y') + self.check_grad(['X'], 'Y', check_eager=True) def init(self): self.shape = (50, 3) @@ -121,4 +122,5 @@ def test_mask_dtype(): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_nll_loss.py b/python/paddle/fluid/tests/unittests/test_nll_loss.py index a87d9052bd6d3a..0bc5e1cad9acd0 100644 --- a/python/paddle/fluid/tests/unittests/test_nll_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nll_loss.py @@ -17,6 +17,7 @@ import numpy as np import unittest from op_test import OpTest +from paddle.fluid.framework import _test_eager_guard def nll_loss_1d(logs, targets, weight=None, reduction='mean', @@ -97,14 +98,21 @@ def test_NLLLoss_1D_mean(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss() dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() + with fluid.dygraph.guard(): + with _test_eager_guard(): + nll_loss = paddle.nn.loss.NLLLoss() + eager_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) + eager_result = eager_res.numpy() + expected = nll_loss_1d(input_np, label_np)[0] self.assertTrue(np.allclose(static_result, expected)) self.assertTrue(np.allclose(static_result, dy_result)) self.assertTrue(np.allclose(dy_result, expected)) + self.assertTrue(np.allclose(eager_result, expected)) def test_NLLLoss_1D_sum(self): np.random.seed(200) @@ -132,14 +140,24 @@ def test_NLLLoss_1D_sum(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() + with _test_eager_guard(): + nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') + in_t = paddle.to_tensor(input_np) + label = paddle.to_tensor(label_np) + in_t.stop_gradient = False + eager_res = nll_loss(in_t, label) + eager_result = eager_res.numpy() + loss = eager_res.sum() + loss.backward() + expected = nll_loss_1d(input_np, label_np, reduction='sum')[0] self.assertTrue(np.allclose(static_result, expected)) self.assertTrue(np.allclose(static_result, dy_result)) self.assertTrue(np.allclose(dy_result, expected)) + self.assertTrue(np.allclose(eager_result, expected)) def test_NLLLoss_1D_with_weight_mean(self): np.random.seed(200) @@ -170,16 +188,26 @@ def test_NLLLoss_1D_with_weight_mean(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np)) + weight=paddle.to_tensor(weight_np)) dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() + + with _test_eager_guard(): + nll_loss = paddle.nn.loss.NLLLoss( + weight=paddle.to_tensor(weight_np)) + eager_res = nll_loss( + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) + loss = eager_res.sum() + loss.backward() + eager_result = eager_res.numpy() + expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0] self.assertTrue(np.allclose(static_result, expected)) self.assertTrue(np.allclose(static_result, dy_result)) self.assertTrue(np.allclose(dy_result, expected)) + self.assertTrue(np.allclose(eager_result, expected)) def test_NLLLoss_1D_with_weight_sum(self): np.random.seed(200) @@ -210,10 +238,9 @@ def test_NLLLoss_1D_with_weight_sum(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='sum') + weight=paddle.to_tensor(weight_np), reduction='sum') dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() expected = nll_loss_1d( input_np, label_np, weight=weight_np, reduction='sum')[0] @@ -249,10 +276,9 @@ def test_NLLLoss_1D_with_weight_mean_cpu(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np)) + weight=paddle.to_tensor(weight_np)) dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0] @@ -287,10 +313,9 @@ def test_NLLLoss_1D_with_weight_no_reduce_cpu(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='none') + weight=paddle.to_tensor(weight_np), reduction='none') dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() expected = nll_loss_1d( input_np, label_np, weight=weight_np, reduction='none') @@ -326,8 +351,7 @@ def test_NLLLoss_2D_mean(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss() dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() expected = nll_loss_2d(input_np, label_np)[0] @@ -363,8 +387,7 @@ def test_NLLLoss_2D_sum(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() expected = nll_loss_2d(input_np, label_np, reduction='sum')[0] @@ -404,10 +427,9 @@ def test_NLLLoss_2D_with_weight_mean(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np)) + weight=paddle.to_tensor(weight_np)) dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0] @@ -445,10 +467,9 @@ def test_NLLLoss_2D_with_weight_mean_cpu(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np)) + weight=paddle.to_tensor(weight_np)) dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0] @@ -487,10 +508,9 @@ def test_NLLLoss_2D_with_weight_sum(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='sum') + weight=paddle.to_tensor(weight_np), reduction='sum') dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() expected = nll_loss_2d( @@ -527,8 +547,7 @@ def test_NLLLoss_in_dims_not_2or4_mean(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss() dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() input_shape = input_np.shape @@ -572,10 +591,9 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_mean(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np)) + weight=paddle.to_tensor(weight_np)) dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() input_shape = input_np.shape @@ -620,10 +638,9 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_sum(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='sum') + weight=paddle.to_tensor(weight_np), reduction='sum') dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() input_shape = input_np.shape @@ -671,10 +688,9 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='none') + weight=paddle.to_tensor(weight_np), reduction='none') dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() input_shape = input_np.shape @@ -721,10 +737,9 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce_cpu(self): with fluid.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='none') + weight=paddle.to_tensor(weight_np), reduction='none') dy_res = nll_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np)) + paddle.to_tensor(input_np), paddle.to_tensor(label_np)) dy_result = dy_res.numpy() input_shape = input_np.shape @@ -749,6 +764,8 @@ def setUp(self): self.init_test_case() self.op_type = "nll_loss" self.with_weight = False + self.python_api = paddle.nn.functional.nll_loss + self.python_out_sig = ["Out"] np.random.seed(200) input_np = np.random.uniform(0.1, 0.8, self.input_shape).astype("float64") @@ -769,7 +786,7 @@ def setUp(self): self.attrs = {'reduction': 'mean', 'ignore_index': -100} def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_output_with_weight(self): self.with_weight = True @@ -778,7 +795,7 @@ def test_check_output_with_weight(self): def test_check_grad(self): self.with_weight = True place = fluid.CPUPlace() - self.check_grad_with_place(place, ['X'], 'Out') + self.check_grad_with_place(place, ['X'], 'Out', check_eager=False) if fluid.core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) self.check_grad_with_place(place, ['X'], 'Out') @@ -1014,4 +1031,5 @@ def test_nll_loss_function_reduction_imperative_not_sum_mean_none(): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_top_k_op.py b/python/paddle/fluid/tests/unittests/test_top_k_op.py index 52d1fda0ae299d..83a940d064e76b 100644 --- a/python/paddle/fluid/tests/unittests/test_top_k_op.py +++ b/python/paddle/fluid/tests/unittests/test_top_k_op.py @@ -18,6 +18,7 @@ import numpy as np from op_test import OpTest import paddle.fluid.core as core +import paddle class TestTopkOp(OpTest): @@ -61,4 +62,5 @@ def test_check_grad(self): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py b/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py index 4be53304733cbf..f1c4ca18da72b9 100644 --- a/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py @@ -19,6 +19,7 @@ from op_test import OpTest import paddle import paddle.fluid.core as core +from paddle.fluid.framework import _test_eager_guard def numpy_topk(x, k=1, axis=-1, largest=True): @@ -45,6 +46,7 @@ def init_args(self): def setUp(self): self.op_type = "top_k_v2" + self.python_api = paddle.topk self.dtype = np.float64 self.input_data = np.random.rand(10, 20) self.init_args() @@ -55,12 +57,10 @@ def setUp(self): self.outputs = {'Out': output, 'Indices': indices} def test_check_output(self): - paddle.enable_static() - self.check_output() + self.check_output(check_eager=False) def test_check_grad(self): - paddle.enable_static() - self.check_grad(set(['X']), 'Out') + self.check_grad(set(['X']), 'Out', check_eager=False) class TestTopkOp1(TestTopkOp): @@ -85,6 +85,7 @@ def init_args(self): def setUp(self): self.op_type = "top_k_v2" + self.python_api = paddle.topk self.dtype = np.float64 self.input_data = np.random.rand(16, 100) self.init_args() @@ -103,6 +104,7 @@ def init_args(self): def setUp(self): self.op_type = "top_k_v2" + self.python_api = paddle.topk self.dtype = np.float64 self.input_data = np.random.rand(10, 10, 5) self.init_args() @@ -121,6 +123,7 @@ def init_args(self): def setUp(self): self.op_type = "top_k_v2" + self.python_api = paddle.topk self.dtype = np.float64 self.input_data = np.random.rand(10, 10, 5) self.init_args() @@ -139,6 +142,7 @@ def init_args(self): def setUp(self): self.op_type = "top_k_v2" + self.python_api = paddle.topk self.dtype = np.float64 self.input_data = np.random.rand(80, 16384) self.init_args() @@ -156,48 +160,64 @@ def setUp(self): self.large_input_data = np.random.rand(2, 1030) def run_dygraph(self, place): - paddle.disable_static(place) - input_tensor = paddle.to_tensor(self.input_data) - large_input_tensor = paddle.to_tensor(self.large_input_data) - # test case for basic test case 1 - paddle_result = paddle.topk(input_tensor, k=2) - numpy_result = numpy_topk(self.input_data, k=2) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) - # test case for basic test case 2 with axis - paddle_result = paddle.topk(input_tensor, k=2, axis=1) - numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) - # test case for basic test case 3 with tensor K - k_tensor = paddle.to_tensor(np.array([2])) - paddle_result = paddle.topk(input_tensor, k=k_tensor, axis=1) - numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) - # test case for basic test case 4 with tensor largest - k_tensor = paddle.to_tensor(np.array([2])) - paddle_result = paddle.topk(input_tensor, k=2, axis=1, largest=False) - numpy_result = numpy_topk(self.input_data, k=2, axis=1, largest=False) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) - # test case for basic test case 5 with axis -1 - k_tensor = paddle.to_tensor(np.array([2])) - paddle_result = paddle.topk(input_tensor, k=2, axis=-1, largest=False) - numpy_result = numpy_topk(self.input_data, k=2, axis=-1, largest=False) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) - # test case for basic test case 6 for the partial sort - paddle_result = paddle.topk(large_input_tensor, k=1, axis=-1) - numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1) - self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1])) - # test case for basic test case 7 for the unsorted - paddle_result = paddle.topk(input_tensor, k=2, axis=1, sorted=False) - sort_paddle = numpy_topk( - np.array(paddle_result[0].numpy()), axis=1, k=2) - numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(sort_paddle[0], numpy_result[0])) + with paddle.fluid.dygraph.guard(place): + input_tensor = paddle.to_tensor(self.input_data) + large_input_tensor = paddle.to_tensor(self.large_input_data) + # test case for basic test case 1 + paddle_result = paddle.topk(input_tensor, k=2) + numpy_result = numpy_topk(self.input_data, k=2) + self.assertTrue( + np.allclose(paddle_result[0].numpy(), numpy_result[0])) + self.assertTrue( + np.allclose(paddle_result[1].numpy(), numpy_result[1])) + # test case for basic test case 2 with axis + paddle_result = paddle.topk(input_tensor, k=2, axis=1) + numpy_result = numpy_topk(self.input_data, k=2, axis=1) + self.assertTrue( + np.allclose(paddle_result[0].numpy(), numpy_result[0])) + self.assertTrue( + np.allclose(paddle_result[1].numpy(), numpy_result[1])) + # test case for basic test case 3 with tensor K + k_tensor = paddle.to_tensor(np.array([2])) + paddle_result = paddle.topk(input_tensor, k=k_tensor, axis=1) + numpy_result = numpy_topk(self.input_data, k=2, axis=1) + self.assertTrue( + np.allclose(paddle_result[0].numpy(), numpy_result[0])) + self.assertTrue( + np.allclose(paddle_result[1].numpy(), numpy_result[1])) + # test case for basic test case 4 with tensor largest + k_tensor = paddle.to_tensor(np.array([2])) + paddle_result = paddle.topk( + input_tensor, k=2, axis=1, largest=False) + numpy_result = numpy_topk( + self.input_data, k=2, axis=1, largest=False) + self.assertTrue( + np.allclose(paddle_result[0].numpy(), numpy_result[0])) + self.assertTrue( + np.allclose(paddle_result[1].numpy(), numpy_result[1])) + # test case for basic test case 5 with axis -1 + k_tensor = paddle.to_tensor(np.array([2])) + paddle_result = paddle.topk( + input_tensor, k=2, axis=-1, largest=False) + numpy_result = numpy_topk( + self.input_data, k=2, axis=-1, largest=False) + self.assertTrue( + np.allclose(paddle_result[0].numpy(), numpy_result[0])) + self.assertTrue( + np.allclose(paddle_result[1].numpy(), numpy_result[1])) + # test case for basic test case 6 for the partial sort + paddle_result = paddle.topk(large_input_tensor, k=1, axis=-1) + numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1) + self.assertTrue( + np.allclose(paddle_result[0].numpy(), numpy_result[0])) + self.assertTrue( + np.allclose(paddle_result[1].numpy(), numpy_result[1])) + # test case for basic test case 7 for the unsorted + paddle_result = paddle.topk(input_tensor, k=2, axis=1, sorted=False) + sort_paddle = numpy_topk( + np.array(paddle_result[0].numpy()), axis=1, k=2) + numpy_result = numpy_topk(self.input_data, k=2, axis=1) + self.assertTrue(np.allclose(sort_paddle[0], numpy_result[0])) def run_static(self, place): paddle.enable_static() @@ -264,14 +284,15 @@ def test_cases(self): self.run_static(place) def test_errors(self): - paddle.disable_static() - x = paddle.to_tensor([1, 2, 3]) - with self.assertRaises(BaseException): - paddle.topk(x, k=-1) + with paddle.fluid.dygraph.guard(): + x = paddle.to_tensor([1, 2, 3]) + with self.assertRaises(BaseException): + paddle.topk(x, k=-1) - with self.assertRaises(BaseException): - paddle.topk(x, k=0) + with self.assertRaises(BaseException): + paddle.topk(x, k=0) if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py index 6f64322e975454..163e246b715606 100644 --- a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py +++ b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py @@ -74,6 +74,7 @@ def set_attr(self): def setUp(self): self.op_type = "viterbi_decode" + self.python_api = paddle.text.viterbi_decode self.set_attr() bz, length, ntags = self.bz, self.len, self.ntags self.input = np.random.randn(bz, length, ntags).astype(self.dtype) @@ -90,7 +91,7 @@ def setUp(self): self.outputs = {'Scores': scores, 'Path': path} def test_output(self): - self.check_output() + self.check_output(check_eager=True) class TestViterbiAPI(unittest.TestCase): @@ -132,3 +133,8 @@ def check_static_result(self, place): def test_static_net(self): for place in self.places: self.check_static_result(place) + + +if __name__ == "__main__": + paddle.enable_static() + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_yolo_box_op.py b/python/paddle/fluid/tests/unittests/test_yolo_box_op.py index 05a4dfe3c06b61..19dcb49cd957c0 100644 --- a/python/paddle/fluid/tests/unittests/test_yolo_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_yolo_box_op.py @@ -31,7 +31,7 @@ def YoloBox(x, img_size, attrs): an_num = int((len(anchors) // 2)) class_num = attrs['class_num'] conf_thresh = attrs['conf_thresh'] - downsample = attrs['downsample'] + downsample = attrs['downsample_ratio'] clip_bbox = attrs['clip_bbox'] scale_x_y = attrs['scale_x_y'] iou_aware = attrs['iou_aware'] @@ -92,13 +92,14 @@ class TestYoloBoxOp(OpTest): def setUp(self): self.initTestCase() self.op_type = 'yolo_box' + self.python_api = paddle.vision.ops.yolo_box x = np.random.random(self.x_shape).astype('float32') img_size = np.random.randint(10, 20, self.imgsize_shape).astype('int32') self.attrs = { 'anchors': self.anchors, 'class_num': self.class_num, 'conf_thresh': self.conf_thresh, - 'downsample': self.downsample, + 'downsample_ratio': self.downsample, 'clip_bbox': self.clip_bbox, 'scale_x_y': self.scale_x_y, 'iou_aware': self.iou_aware, diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index e757fbf53487e0..d988d1653ca69d 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -28,7 +28,7 @@ from ...tensor import sum from ...tensor import sqrt from ...fluid.data_feeder import check_variable_and_dtype, check_dtype -from ...fluid.framework import _varbase_creator +from ...fluid.framework import _varbase_creator, _in_legacy_dygraph, in_dygraph_mode from ...fluid import dygraph_utils from ...fluid import layers @@ -1616,7 +1616,7 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None): if epsilon > 1. or epsilon < 0.: raise ValueError("The value of epsilon must be between 0 and 1.") - if in_dynamic_mode(): + if paddle.in_dynamic_mode(): return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon)) check_variable_and_dtype(label, 'label', ['float32', 'float64'], diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index e7763853bf7c2c..660e6d35871085 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -37,7 +37,7 @@ from paddle import _C_ops from paddle import in_dynamic_mode from paddle.framework import core -from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode +from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _non_static_mode __all__ = [] @@ -784,11 +784,12 @@ def nll_loss(input, input_dims)) n = input_shape[0] c = input_shape[1] - if in_dynamic_mode(): + if _non_static_mode(): if input_dims != 2 and input_dims != 4: input, _ = _C_ops.reshape2(input, None, 'shape', [n, c, 1, -1]) label, _ = _C_ops.reshape2(label, None, 'shape', [n, 1, -1]) out_shape = [n] + input_shape[2:] + out, total_weight = _C_ops.nll_loss(input, label, weight, 'ignore_index', ignore_index, 'reduction', reduction) diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index c039754af4d123..536c611d85f28f 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -181,7 +181,7 @@ def batch_norm(x, trainable_statistics = not use_global_stats if in_dynamic_mode(): - # for dygraph need tuple + attrs = ("momentum", momentum, "epsilon", epsilon, "is_test", not training, "data_layout", data_format, "use_mkldnn", False, "fuse_with_relu", False, "use_global_stats", use_global_stats, diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 7901379d9c7934..4b8395e1c43c89 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -1397,7 +1397,10 @@ def histogram(input, bins=100, min=0, max=0, name=None): result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): + return _C_ops.final_state_histogram(input, bins, min, max) + + if _in_legacy_dygraph(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 03b64e2b828df5..3c02c11b933c10 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -536,6 +536,8 @@ def bitwise_and(x, y, out=None, name=None): res = paddle.bitwise_and(x, y) print(res) # [0, 2, 1] """ + if in_dygraph_mode() and out == None: + return _C_ops.final_state_bitwise_and(x, y) return _bitwise_op( op_name="bitwise_and", x=x, y=y, name=name, out=out, binary_op=True) @@ -562,6 +564,9 @@ def bitwise_or(x, y, out=None, name=None): res = paddle.bitwise_or(x, y) print(res) # [-1, -1, -3] """ + if in_dygraph_mode() and out == None: + return _C_ops.final_state_bitwise_or(x, y) + return _bitwise_op( op_name="bitwise_or", x=x, y=y, name=name, out=out, binary_op=True) @@ -588,6 +593,8 @@ def bitwise_xor(x, y, out=None, name=None): res = paddle.bitwise_xor(x, y) print(res) # [-1, -3, -4] """ + if in_dygraph_mode() and out == None: + return _C_ops.final_state_bitwise_xor(x, y) return _bitwise_op( op_name="bitwise_xor", x=x, y=y, name=name, out=out, binary_op=True) @@ -612,6 +619,8 @@ def bitwise_not(x, out=None, name=None): res = paddle.bitwise_not(x) print(res) # [4, 0, -2] """ + if in_dygraph_mode() and out == None: + return _C_ops.final_state_bitwise_not(x) return _bitwise_op( op_name="bitwise_not", x=x, y=None, name=name, out=out, binary_op=False) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 7921c7798be3a8..68d6aca35ad655 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -17,7 +17,7 @@ from ..static import Variable, device_guard from ..framework import core -from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _in_eager_without_dygraph_check +from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _in_eager_without_dygraph_check, _non_static_mode from ..fluid.layer_helper import LayerHelper from ..framework import OpProtoHolder, convert_np_dtype_to_dtype_, dygraph_only from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype @@ -1845,7 +1845,7 @@ def expand_as(x, y, name=None): np_out = out.numpy() # [[1, 2, 3], [1, 2, 3]] """ - if paddle.in_dynamic_mode(): + if _non_static_mode(): return _C_ops.expand_as_v2(x, 'target_shape', y.shape) check_variable_and_dtype( diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 7ee684f5a2f077..48fa363f77c356 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -2681,7 +2681,9 @@ def isfinite(x, name=None): out = paddle.tensor.isfinite(x) print(out) # [False True True False True False False] """ - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): + return _C_ops.final_state_isfinite( x ) + if _in_legacy_dygraph(): return _C_ops.isfinite_v2(x) helper = LayerHelper("isfinite_v2", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite') @@ -2709,7 +2711,9 @@ def isinf(x, name=None): out = paddle.tensor.isinf(x) print(out) # [ True False False True False False False] """ - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): + return _C_ops.final_state_isinf( x ) + if _in_legacy_dygraph(): return _C_ops.isinf_v2(x) helper = LayerHelper("isinf_v2", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf') @@ -2737,7 +2741,10 @@ def isnan(x, name=None): out = paddle.tensor.isnan(x) print(out) # [False False False False False True True] """ - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): + return _C_ops.final_state_isnan( x ) + + if _in_legacy_dygraph(): return _C_ops.isnan_v2(x) helper = LayerHelper("isnan_v2", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan') @@ -3387,8 +3394,13 @@ def lerp(x, y, weight, name=None): # out: [5.5., 6., 6.5, 7.] """ - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp') + if isinstance(weight, float): + weight = paddle.to_tensor(weight, dtype=x.dtype) + + return _C_ops.final_state_lerp( x, y, weight) + if _in_legacy_dygraph(): if isinstance(weight, float): weight = paddle.to_tensor(weight, dtype=x.dtype) return _C_ops.lerp(x, y, weight) diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index ef10135fb99c10..c41c76f1b379b9 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -18,7 +18,7 @@ from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..fluid import layers from ..framework import core -from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode +from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _non_static_mode from paddle.common_ops_import import convert_np_dtype_to_dtype_ from paddle.common_ops_import import Variable from paddle.common_ops_import import VarDesc @@ -774,7 +774,10 @@ def masked_select(x, mask, name=None): #[1.0 5.0 6.0 9.0] """ - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): + return _C_ops.final_state_masked_select(x, mask) + + if _in_legacy_dygraph(): return _C_ops.masked_select(x, mask) helper = LayerHelper("masked_select", **locals()) @@ -844,8 +847,8 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None): # [[1 1 0 0]] """ - if paddle.in_dynamic_mode(): - k = k.numpy().item(0) if isinstance(k, Variable) else k + + if _non_static_mode(): if axis is None: out, indices = _C_ops.top_k_v2(x, 'k', int(k), 'largest', largest, 'sorted', diff --git a/python/paddle/text/viterbi_decode.py b/python/paddle/text/viterbi_decode.py index dbf16bfbc6a970..ce5667b134a03e 100644 --- a/python/paddle/text/viterbi_decode.py +++ b/python/paddle/text/viterbi_decode.py @@ -13,7 +13,7 @@ # limitations under the License. from ..nn import Layer -from ..fluid.framework import core, _non_static_mode +from ..fluid.framework import core, _non_static_mode, in_dygraph_mode from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype, check_type from paddle import _C_ops @@ -58,6 +58,10 @@ def viterbi_decode(potentials, transition = paddle.rand((num_tags, num_tags), dtype='float32') scores, path = paddle.text.viterbi_decode(emission, transition, length, False) # scores: [3.37089300, 1.56825531], path: [[1, 0, 0], [1, 1, 0]] """ + if in_dygraph_mode(): + return _C_ops.final_state_viterbi_decode(potentials, transition_params, + lengths, include_bos_eos_tag) + if _non_static_mode(): return _C_ops.viterbi_decode(potentials, transition_params, lengths, 'include_bos_eos_tag', include_bos_eos_tag) diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 5c4adcbfecbf29..5499c81c7ecd97 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -547,6 +547,15 @@ func : hard_sigmoid backward : hard_sigmoid_grad +# histogram +- api : histogram + args : (Tensor x, int64_t bins, int min, int max) + output : Tensor + infer_meta : + func : HistogramInferMeta + kernel : + func : histogram + - api : huber_loss args : (Tensor input, Tensor label, float delta) output : Tensor(out), Tensor(residual) diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index 00bd6ed38a3adf..b510b7c8bdfe8f 100644 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -19,7 +19,7 @@ from ..fluid.layers import nn, utils from ..nn import Layer, Conv2D, Sequential, ReLU, BatchNorm2D from ..fluid.initializer import Normal -from ..fluid.framework import _non_static_mode +from ..fluid.framework import _non_static_mode, in_dygraph_mode from paddle.common_ops_import import * from paddle import _C_ops @@ -377,6 +377,12 @@ def yolo_box(x, clip_bbox=True, scale_x_y=1.) """ + if in_dygraph_mode(): + boxes, scores = _C_ops.final_state_yolo_box( + x, img_size, anchors, class_num, conf_thresh, downsample_ratio, + clip_bbox, scale_x_y, iou_aware, iou_aware_factor) + return boxes, scores + if _non_static_mode(): boxes, scores = _C_ops.yolo_box( x, img_size, 'anchors', anchors, 'class_num', class_num, From 9b6a02d4563cef827ebf03a3f010f214dcb0931d Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Fri, 1 Apr 2022 10:04:24 +0800 Subject: [PATCH 2/6] [Phi] Add shape and strided_slice yaml & Adapt eager mode (#41131) * add several yaml * polish strided slice kernel & add yaml * reorder yaml * add several yaml * revert yaml config change * resolve conflict * Update test_strided_slice_op.py --- paddle/fluid/operators/strided_slice_op.cc | 2 +- paddle/phi/infermeta/unary.cc | 31 +- paddle/phi/infermeta/unary.h | 12 +- .../kernels/cpu/strided_slice_grad_kernel.cc | 4 +- .../phi/kernels/cpu/strided_slice_kernel.cc | 4 +- .../kernels/gpu/strided_slice_grad_kernel.cu | 4 +- .../phi/kernels/gpu/strided_slice_kernel.cu | 4 +- .../impl/strided_slice_grad_kernel_impl.h | 20 +- .../kernels/impl/strided_slice_kernel_impl.h | 18 +- .../phi/kernels/strided_slice_grad_kernel.cc | 69 +++ .../phi/kernels/strided_slice_grad_kernel.h | 14 +- paddle/phi/kernels/strided_slice_kernel.cc | 60 +++ paddle/phi/kernels/strided_slice_kernel.h | 13 +- paddle/phi/ops/compat/strided_slice_sig.cc | 424 +++--------------- python/paddle/fluid/layers/nn.py | 10 +- .../fluid/tests/unittests/test_shape_op.py | 4 +- .../tests/unittests/test_strided_slice_op.py | 7 +- python/paddle/utils/code_gen/api.yaml | 17 + python/paddle/utils/code_gen/backward.yaml | 10 + 19 files changed, 317 insertions(+), 410 deletions(-) create mode 100644 paddle/phi/kernels/strided_slice_grad_kernel.cc create mode 100644 paddle/phi/kernels/strided_slice_kernel.cc diff --git a/paddle/fluid/operators/strided_slice_op.cc b/paddle/fluid/operators/strided_slice_op.cc index 0ff7d654fc29d1..6f092bbef067ed 100644 --- a/paddle/fluid/operators/strided_slice_op.cc +++ b/paddle/fluid/operators/strided_slice_op.cc @@ -228,7 +228,7 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER(StridedSliceOpGradNoNeedBufferVarsInferer, namespace ops = paddle::operators; DECLARE_INFER_SHAPE_FUNCTOR(strided_slice, StridedSliceInferShape, - PD_INFER_META(phi::StridedSliceInferMeta)); + PD_INFER_META(phi::StridedSliceRawInferMeta)); REGISTER_OPERATOR(strided_slice, ops::StridedSliceOp, ops::StridedSliceOpMaker, ops::StridedSliceOpGradMaker, diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index d763b23ef5c358..6bf7a36b065345 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -1922,15 +1922,15 @@ void SqueezeInferMeta(const MetaTensor& x, out->set_dtype(x.dtype()); } -void StridedSliceInferMeta(const MetaTensor& x, - const std::vector& axes, - const IntArray& starts, - const IntArray& ends, - const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, - MetaTensor* out, - MetaConfig config) { +void StridedSliceRawInferMeta(const MetaTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + MetaTensor* out, + MetaConfig config) { auto in_dims = x.dims(); PADDLE_ENFORCE_LT( in_dims.size(), @@ -2052,6 +2052,19 @@ void StridedSliceInferMeta(const MetaTensor& x, out->set_dtype(x.dtype()); } +void StridedSliceInferMeta(const MetaTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + MetaTensor* out, + MetaConfig config) { + std::vector infer_flags(axes.size(), 1); + std::vector decrease_axis; + StridedSliceRawInferMeta( + x, axes, starts, ends, strides, infer_flags, decrease_axis, out, config); +} + /* Why not use SumRawInferMeta directly? Because we need make InferMetaFunction's args follow the design of api.yaml */ diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index 7ab0f3df2af323..54f70d8d554054 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -284,13 +284,21 @@ void SqueezeInferMeta(const MetaTensor& x, MetaTensor* xshape, MetaTensor* out); +void StridedSliceRawInferMeta(const MetaTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + MetaTensor* out, + MetaConfig config = MetaConfig()); + void StridedSliceInferMeta(const MetaTensor& x, const std::vector& axes, const IntArray& starts, const IntArray& ends, const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, MetaTensor* out, MetaConfig config = MetaConfig()); diff --git a/paddle/phi/kernels/cpu/strided_slice_grad_kernel.cc b/paddle/phi/kernels/cpu/strided_slice_grad_kernel.cc index cdc5534d63c085..e6c812cf6bd5aa 100644 --- a/paddle/phi/kernels/cpu/strided_slice_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/strided_slice_grad_kernel.cc @@ -19,10 +19,10 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h" -PD_REGISTER_KERNEL(strided_slice_grad, +PD_REGISTER_KERNEL(strided_slice_raw_grad, CPU, ALL_LAYOUT, - phi::StridedSliceGradKernel, + phi::StridedSliceRawGradKernel, bool, int, int64_t, diff --git a/paddle/phi/kernels/cpu/strided_slice_kernel.cc b/paddle/phi/kernels/cpu/strided_slice_kernel.cc index f34a3301fcb42b..d0aa7b2f4cee62 100644 --- a/paddle/phi/kernels/cpu/strided_slice_kernel.cc +++ b/paddle/phi/kernels/cpu/strided_slice_kernel.cc @@ -19,10 +19,10 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/strided_slice_kernel_impl.h" -PD_REGISTER_KERNEL(strided_slice, +PD_REGISTER_KERNEL(strided_slice_raw, CPU, ALL_LAYOUT, - phi::StridedSliceKernel, + phi::StridedSliceRawKernel, bool, int, int64_t, diff --git a/paddle/phi/kernels/gpu/strided_slice_grad_kernel.cu b/paddle/phi/kernels/gpu/strided_slice_grad_kernel.cu index 5f31d488533a6e..90d9f1d9865773 100644 --- a/paddle/phi/kernels/gpu/strided_slice_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/strided_slice_grad_kernel.cu @@ -19,10 +19,10 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h" -PD_REGISTER_KERNEL(strided_slice_grad, +PD_REGISTER_KERNEL(strided_slice_raw_grad, GPU, ALL_LAYOUT, - phi::StridedSliceGradKernel, + phi::StridedSliceRawGradKernel, bool, int, int64_t, diff --git a/paddle/phi/kernels/gpu/strided_slice_kernel.cu b/paddle/phi/kernels/gpu/strided_slice_kernel.cu index ff10718edb323e..716150ff47dea9 100644 --- a/paddle/phi/kernels/gpu/strided_slice_kernel.cu +++ b/paddle/phi/kernels/gpu/strided_slice_kernel.cu @@ -19,10 +19,10 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/strided_slice_kernel_impl.h" -PD_REGISTER_KERNEL(strided_slice, +PD_REGISTER_KERNEL(strided_slice_raw, GPU, ALL_LAYOUT, - phi::StridedSliceKernel, + phi::StridedSliceRawKernel, bool, int, int64_t, diff --git a/paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h b/paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h index f0fddce6b55472..95780682c98dd0 100644 --- a/paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/strided_slice_grad_kernel_impl.h @@ -20,16 +20,16 @@ namespace phi { template -void StridedSliceGradKernel(const Context& dev_ctx, - const DenseTensor& x, - const DenseTensor& out_grad, - const std::vector& axes, - const IntArray& starts, - const IntArray& ends, - const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, - DenseTensor* x_grad) { +void StridedSliceRawGradKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& out_grad, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + DenseTensor* x_grad) { int rank = x.dims().size(); #define SLICE_CASE(Rank) \ case Rank: \ diff --git a/paddle/phi/kernels/impl/strided_slice_kernel_impl.h b/paddle/phi/kernels/impl/strided_slice_kernel_impl.h index 2df937524ef201..81e6d5056267ac 100644 --- a/paddle/phi/kernels/impl/strided_slice_kernel_impl.h +++ b/paddle/phi/kernels/impl/strided_slice_kernel_impl.h @@ -20,15 +20,15 @@ namespace phi { template -void StridedSliceKernel(const Context& dev_ctx, - const DenseTensor& x, - const std::vector& axes, - const IntArray& starts, - const IntArray& ends, - const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, - DenseTensor* out) { +void StridedSliceRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + DenseTensor* out) { int rank = x.dims().size(); #define SLICE_CASE(Rank) \ case Rank: \ diff --git a/paddle/phi/kernels/strided_slice_grad_kernel.cc b/paddle/phi/kernels/strided_slice_grad_kernel.cc new file mode 100644 index 00000000000000..38dd360ea66c21 --- /dev/null +++ b/paddle/phi/kernels/strided_slice_grad_kernel.cc @@ -0,0 +1,69 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/strided_slice_grad_kernel.h" + +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void StridedSliceGradKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& out_grad, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + DenseTensor* x_grad) { + std::vector infer_flags(axes.size(), 1); + std::vector decrease_axis; + StridedSliceRawGradKernel(dev_ctx, + x, + out_grad, + axes, + starts, + ends, + strides, + infer_flags, + decrease_axis, + x_grad); +} + +} // namespace phi + +PD_REGISTER_KERNEL(strided_slice_grad, + CPU, + ALL_LAYOUT, + phi::StridedSliceGradKernel, + bool, + int, + int64_t, + float, + double, + phi::dtype::complex, + phi::dtype::complex) {} +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) +PD_REGISTER_KERNEL(strided_slice_grad, + GPU, + ALL_LAYOUT, + phi::StridedSliceGradKernel, + bool, + int, + int64_t, + float, + double, + phi::dtype::complex, + phi::dtype::complex) {} +#endif diff --git a/paddle/phi/kernels/strided_slice_grad_kernel.h b/paddle/phi/kernels/strided_slice_grad_kernel.h index 07fba9d27bfe90..21d01310b662f4 100644 --- a/paddle/phi/kernels/strided_slice_grad_kernel.h +++ b/paddle/phi/kernels/strided_slice_grad_kernel.h @@ -19,6 +19,18 @@ namespace phi { +template +void StridedSliceRawGradKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& out_grad, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + DenseTensor* x_grad); + template void StridedSliceGradKernel(const Context& dev_ctx, const DenseTensor& x, @@ -27,8 +39,6 @@ void StridedSliceGradKernel(const Context& dev_ctx, const IntArray& starts, const IntArray& ends, const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, DenseTensor* x_grad); template diff --git a/paddle/phi/kernels/strided_slice_kernel.cc b/paddle/phi/kernels/strided_slice_kernel.cc new file mode 100644 index 00000000000000..547d574cd78d04 --- /dev/null +++ b/paddle/phi/kernels/strided_slice_kernel.cc @@ -0,0 +1,60 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/strided_slice_kernel.h" + +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void StridedSliceKernel(const Context& dev_ctx, + const DenseTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + DenseTensor* out) { + std::vector infer_flags(axes.size(), 1); + std::vector decrease_axis; + StridedSliceRawKernel( + dev_ctx, x, axes, starts, ends, strides, infer_flags, decrease_axis, out); +} + +} // namespace phi + +PD_REGISTER_KERNEL(strided_slice, + CPU, + ALL_LAYOUT, + phi::StridedSliceKernel, + bool, + int, + int64_t, + float, + double, + phi::dtype::complex, + phi::dtype::complex) {} +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) +PD_REGISTER_KERNEL(strided_slice, + GPU, + ALL_LAYOUT, + phi::StridedSliceKernel, + bool, + int, + int64_t, + float, + double, + phi::dtype::complex, + phi::dtype::complex) {} +#endif diff --git a/paddle/phi/kernels/strided_slice_kernel.h b/paddle/phi/kernels/strided_slice_kernel.h index fd90d81b8556c2..2c8b373bf03a85 100644 --- a/paddle/phi/kernels/strided_slice_kernel.h +++ b/paddle/phi/kernels/strided_slice_kernel.h @@ -19,6 +19,17 @@ namespace phi { +template +void StridedSliceRawKernel(const Context& dev_ctx, + const DenseTensor& x, + const std::vector& axes, + const IntArray& starts, + const IntArray& ends, + const IntArray& strides, + const std::vector& infer_flags, + const std::vector& decrease_axis, + DenseTensor* out); + template void StridedSliceKernel(const Context& dev_ctx, const DenseTensor& x, @@ -26,8 +37,6 @@ void StridedSliceKernel(const Context& dev_ctx, const IntArray& starts, const IntArray& ends, const IntArray& strides, - const std::vector& infer_flags, - const std::vector& decrease_axis, DenseTensor* out); template diff --git a/paddle/phi/ops/compat/strided_slice_sig.cc b/paddle/phi/ops/compat/strided_slice_sig.cc index 70ce2e3e07ce90..9fb70af0dea515 100644 --- a/paddle/phi/ops/compat/strided_slice_sig.cc +++ b/paddle/phi/ops/compat/strided_slice_sig.cc @@ -57,14 +57,14 @@ KernelSignature StridedSliceOpArgumentMapping( "decrease_axis"}; paddle::SmallVector outputs = {"Out"}; - std::string op_type; + std::string kernel_name; if (ctx.IsDenseTensorVectorInput("Input")) { - op_type = "strided_slice_array"; + kernel_name = "strided_slice_array"; } else { - op_type = "strided_slice"; + kernel_name = "strided_slice_raw"; } // NOTE(dev): Use this to avoid regularization. - KernelSignature sig(op_type, inputs, attrs, outputs); + KernelSignature sig(kernel_name, inputs, attrs, outputs); return sig; } @@ -106,15 +106,15 @@ KernelSignature StridedSliceGradOpArgumentMapping( "decrease_axis"}; paddle::SmallVector outputs = {GradVarName("Input")}; - std::string op_type; + std::string kernel_name; if (ctx.IsDenseTensorVectorInput("Input")) { - op_type = "strided_slice_array_grad"; + kernel_name = "strided_slice_array_grad"; } else { - op_type = "strided_slice_grad"; + kernel_name = "strided_slice_raw_grad"; } // NOTE(dev): Use this to avoid regularization. - KernelSignature sig(op_type, inputs, attrs, outputs); + KernelSignature sig(kernel_name, inputs, attrs, outputs); return sig; } @@ -132,573 +132,273 @@ NOTE: The following codes are for 'get_compat_kernel_signature.py' ############################ Forward ############################ -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensor", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "StartsTensorList", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice}", {"Input"}, +return KernelSignature("strided_slice_raw", {"Input"}, {"axes", "starts", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensor", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "StartsTensorList", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensor", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensor", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensor", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensorList", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensorList", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "EndsTensorList", "starts","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "ends", "StartsTensor","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "ends", "StartsTensorList","infer_flags", "decrease_axis"}, {"Out"}); -return KernelSignature("{strided_slice_array}", {"Input"}, +return KernelSignature("strided_slice_array", {"Input"}, {"axes", "starts", "ends", "starts","infer_flags", "decrease_axis"}, {"Out"}); - -############################ Backward ############################ - - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"starts","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "ends", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_grad}", {"Input", GradVarName("Out")}, - {"axes", "starts", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "EndsTensorList", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensor", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"StartsTensor","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "EndsTensorList", -"starts","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "StartsTensorList", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensor", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", -"StartsTensorList","infer_flags", "decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "EndsTensorList", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "ends", "StartsTensor","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "ends", "StartsTensorList","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); - -return KernelSignature("{strided_slice_array_grad}", {"Input", -GradVarName("Out")}, - {"axes", "starts", "ends", "starts","infer_flags", -"decrease_axis"}, - {GradVarName("Input")}); */ diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index cb3781d5c299b8..0be014394f851b 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11426,6 +11426,10 @@ def strided_slice(input, axes, starts, ends, strides): sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2) # sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2]. """ + if in_dygraph_mode(): + return _C_ops.final_state_strided_slice(input, axes, starts, ends, + strides) + helper = LayerHelper('strided_slice', **locals()) check_variable_and_dtype(input, 'input', @@ -11590,7 +11594,11 @@ def shape(input): res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) print(res) # [array([ 3, 100, 100], dtype=int32)] """ - if _non_static_mode(): + if in_dygraph_mode(): + out = _C_ops.final_state_shape(input) + out.stop_gradient = True + return out + if _in_legacy_dygraph(): out = _C_ops.shape(input) out.stop_gradient = True return out diff --git a/python/paddle/fluid/tests/unittests/test_shape_op.py b/python/paddle/fluid/tests/unittests/test_shape_op.py index bada62e3239ead..3d961a7413ca00 100644 --- a/python/paddle/fluid/tests/unittests/test_shape_op.py +++ b/python/paddle/fluid/tests/unittests/test_shape_op.py @@ -17,6 +17,7 @@ import unittest import numpy as np from op_test import OpTest +import paddle from paddle.fluid import core from paddle.fluid.op import Operator @@ -24,6 +25,7 @@ class TestShapeOp(OpTest): def setUp(self): self.op_type = "shape" + self.python_api = paddle.shape self.config() self.shape = [2, 3] input = np.zeros(self.shape) @@ -34,7 +36,7 @@ def config(self): self.shape = [2, 3] def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) class case1(TestShapeOp): diff --git a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py index e9be6b338fb863..ae17cb9b1b57ca 100644 --- a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py @@ -58,6 +58,7 @@ class TestStrideSliceOp(OpTest): def setUp(self): self.initTestCase() self.op_type = 'strided_slice' + self.python_api = paddle.strided_slice self.output = strided_slice_native_forward( self.input, self.axes, self.starts, self.ends, self.strides) @@ -72,10 +73,10 @@ def setUp(self): } def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(set(['Input']), 'Out') + self.check_grad(set(['Input']), 'Out', check_eager=True) def initTestCase(self): self.input = np.random.rand(100) @@ -704,7 +705,7 @@ def create_case(self, net): l2.sum().backward() grads_static = net.get_all_grads() net.clear_all_grad() - # compare result of dygraph and static + # compare result of dygraph and static self.is_grads_equal(grads_static, grads_dy) self.assertTrue( np.array_equal(s1, s2), diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 5499c81c7ecd97..c89e519f80f7aa 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -951,6 +951,14 @@ func : selu backward : selu_grad +- api : shape + args : (Tensor input) + output : Tensor + infer_meta : + func : ShapeInferMeta + kernel : + func : shape, shape_sr + # shard_index - api : shard_index args : (Tensor in, int index_num, int nshards, int shard_id, int ignore_value) @@ -1070,6 +1078,15 @@ func : square backward : square_grad +- api : strided_slice + args : (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) + output : Tensor + infer_meta : + func : StridedSliceInferMeta + kernel : + func : strided_slice + backward : strided_slice_grad + - api : subtract args : (Tensor x, Tensor y) output : Tensor diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 5efe6e7451782e..3830d7f92689be 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -660,6 +660,16 @@ kernel : func : square_grad +- backward_api : strided_slice_grad + forward : strided_slice (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) -> Tensor(out) + args : (Tensor x, Tensor out_grad, int[] axes, IntArray starts, IntArray ends, IntArray strides) + output : Tensor(x_grad) + infer_meta : + func : GeneralUnaryGradInferMeta + param : [x] + kernel : + func : strided_slice_grad + - backward_api : subtract_grad forward : subtract (Tensor x, Tensor y) -> Tensor(out) args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1) From e6a19aea92094c1819efa1b9b55acc4f38d65d29 Mon Sep 17 00:00:00 2001 From: zhiboniu <31800336+zhiboniu@users.noreply.github.com> Date: Fri, 1 Apr 2022 10:25:44 +0800 Subject: [PATCH 3/6] add framework._non_static_mode temporarily for hackson; test=document_fix (#41220) --- python/paddle/framework/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/framework/__init__.py b/python/paddle/framework/__init__.py index f4a4052ee5e157..2f8c23187e8d13 100644 --- a/python/paddle/framework/__init__.py +++ b/python/paddle/framework/__init__.py @@ -47,6 +47,7 @@ from ..fluid.dygraph.base import enable_dygraph as disable_static # noqa: F401 from ..fluid.dygraph.base import disable_dygraph as enable_static # noqa: F401 from ..fluid.framework import _non_static_mode as in_dynamic_mode # noqa: F401 +from ..fluid.framework import _non_static_mode # noqa: F401; temporary used for hackson from ..fluid.framework import _current_expected_place, _get_paddle_place # noqa: F401 from ..fluid.framework import dygraph_only # noqa: F401 from ..fluid.framework import convert_np_dtype_to_dtype_, _varbase_creator, OpProtoHolder # noqa: F401 From 5dae6da0245fec9d1805c42655439851917e229f Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Fri, 1 Apr 2022 11:06:46 +0800 Subject: [PATCH 4/6] [new-exec] move WaitEvent/RecordEvent into try-catch (#41222) * move WaitEvent/RecordEvent into try-catch * refine supportNpu --- .../framework/new_executor/interpretercore.cc | 11 ++-- paddle/fluid/framework/operator.cc | 50 +++++++++++++++++++ paddle/fluid/framework/operator.h | 35 ++----------- 3 files changed, 60 insertions(+), 36 deletions(-) diff --git a/paddle/fluid/framework/new_executor/interpretercore.cc b/paddle/fluid/framework/new_executor/interpretercore.cc index e30dd21fc5c0ed..a2f9d904067362 100644 --- a/paddle/fluid/framework/new_executor/interpretercore.cc +++ b/paddle/fluid/framework/new_executor/interpretercore.cc @@ -501,7 +501,7 @@ void InterpreterCore::RunInstruction(const Instruction& instr_node) { } // for debug nan/inf - if (FLAGS_check_nan_inf) { + if (op_with_kernel != nullptr && FLAGS_check_nan_inf) { VLOG(4) << "Check nan/inf"; framework::details::CheckOpHasNanOrInf( *op, *global_scope_, @@ -542,10 +542,12 @@ void InterpreterCore::ExecuteInstructionList( if (exception_holder_.Type() != "EOF") { async_work_queue_->Cancel(); } + VLOG(4) << "Cancel ok"; PADDLE_ENFORCE_EQ( main_thread_blocker_.Clear(), 0, platform::errors::PreconditionNotMet( "main_thread_blocker_.Clear() return -1, clear failed")); + VLOG(4) << "clear ok"; exception_holder_.ReThrow(); } } @@ -637,15 +639,18 @@ void InterpreterCore::RunInstructionAsync( auto* op = instr_node.OpBase(); platform::RecordEvent instruction_event( op->Type(), platform::TracerEventType::Operator, 1); - interpreter::WaitEvent(instr_node, place_); try { + interpreter::WaitEvent(instr_node, place_); + RunInstruction(instr_node); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) RecordStreamForGC(instr_node); #endif CheckGC(instr_node, atomic_var_ref); + + interpreter::RecordEvent(instr_node, place_); } catch (platform::EnforceNotMet& ex) { framework::InsertCallStackInfo(op->Type(), op->Attrs(), &ex); exception_holder_.Catch(std::make_exception_ptr(std::move(ex))); @@ -677,8 +682,6 @@ void InterpreterCore::RunInstructionAsync( } } - interpreter::RecordEvent(instr_node, place_); - RunNextInstructions(instr_node, &ready_ops, atomic_deps, atomic_var_ref); } } diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 4183360f655a7b..efb334ebbd9e5a 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1120,6 +1120,56 @@ static void CheckTensorNANOrInf(const std::string& op_type, op_type, name)); } +bool OperatorWithKernel::SupportGPU() const { + auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap( + phi::TransToPhiKernelName(type_)); + auto has_phi_kernel = + std::any_of(phi_kernels.begin(), phi_kernels.end(), + [](phi::KernelKeyMap::const_reference kern_pair) { + return kern_pair.first.backend() == phi::Backend::GPU; + }); + if (has_phi_kernel) { + return true; + } else { + auto kernel_iter = OperatorWithKernel::AllOpKernels().find(type_); + if (kernel_iter == OperatorWithKernel::AllOpKernels().end()) { + return false; + } else { + auto& op_kernels = kernel_iter->second; + return std::any_of( + op_kernels.begin(), op_kernels.end(), + [](OpKernelMap::const_reference kern_pair) { + return platform::is_gpu_place(kern_pair.first.place_); + }); + } + } +} + +bool OperatorWithKernel::SupportNPU() const { + auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap( + phi::TransToPhiKernelName(type_)); + auto has_phi_kernel = + std::any_of(phi_kernels.begin(), phi_kernels.end(), + [](phi::KernelKeyMap::const_reference kern_pair) { + return kern_pair.first.backend() == phi::Backend::NPU; + }); + if (has_phi_kernel) { + return true; + } else { + auto kernel_iter = OperatorWithKernel::AllOpKernels().find(type_); + if (kernel_iter == OperatorWithKernel::AllOpKernels().end()) { + return false; + } else { + auto& op_kernels = kernel_iter->second; + return std::any_of( + op_kernels.begin(), op_kernels.end(), + [](OpKernelMap::const_reference kern_pair) { + return platform::is_npu_place(kern_pair.first.place_); + }); + } + } +} + bool OperatorWithKernel::SupportsMKLDNN( const proto::VarType::Type data_type) const { auto op_kernel_iter = OperatorWithKernel::AllOpKernels().find(type_); diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index ce22f099447785..f7fc83f1d6d30f 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -560,39 +560,10 @@ class OperatorWithKernel : public OperatorBase { return g_all_op_kernels; } - bool SupportGPU() const override { - auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap( - phi::TransToPhiKernelName(type_)); - auto has_phi_kernel = - std::any_of(phi_kernels.begin(), phi_kernels.end(), - [](phi::KernelKeyMap::const_reference kern_pair) { - return kern_pair.first.backend() == phi::Backend::GPU; - }); - if (has_phi_kernel) { - return true; - } else { - auto kernel_iter = OperatorWithKernel::AllOpKernels().find(type_); - if (kernel_iter == OperatorWithKernel::AllOpKernels().end()) { - return false; - } else { - auto& op_kernels = kernel_iter->second; - return std::any_of( - op_kernels.begin(), op_kernels.end(), - [](OpKernelMap::const_reference kern_pair) { - return platform::is_gpu_place(kern_pair.first.place_); - }); - } - } - } + bool SupportGPU() const override; + + bool SupportNPU() const override; - bool SupportNPU() const override { - // TODO(zhiqiu): support phi if needed? - auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_); - return std::any_of(op_kernels.begin(), op_kernels.end(), - [](OpKernelMap::const_reference kern_pair) { - return platform::is_npu_place(kern_pair.first.place_); - }); - } bool SupportMLU() const override { // TODO(zhiqiu): support phi if needed? auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_); From 8aef685b1c6fef4a9fcb976f9b4630d2d36be704 Mon Sep 17 00:00:00 2001 From: From00 Date: Fri, 1 Apr 2022 11:20:27 +0800 Subject: [PATCH 5/6] Fix compilation errors for gcc-54 (#41228) * Fix compilation error for gcc-54 * Remove const for gpuStream_t --- .../memory/allocation/allocator_facade.cc | 24 +++++++++---------- .../memory/allocation/allocator_facade.h | 13 ++++------ .../allocation/stream_safe_cuda_allocator.cc | 10 ++++---- .../allocation/stream_safe_cuda_allocator.h | 10 ++++---- paddle/fluid/memory/malloc.cc | 7 +++--- paddle/fluid/memory/malloc.h | 8 +++---- 6 files changed, 32 insertions(+), 40 deletions(-) diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index 7619767123f84e..f4dfb76884f175 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -354,8 +354,7 @@ class AllocatorFacadePrivate { } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - bool HasCUDAAllocator(const platform::CUDAPlace& place, - const gpuStream_t& stream) { + bool HasCUDAAllocator(const platform::CUDAPlace& place, gpuStream_t stream) { auto it = cuda_allocators_.find(place); if (it == cuda_allocators_.end()) { return false; @@ -366,7 +365,7 @@ class AllocatorFacadePrivate { } const std::shared_ptr& GetAllocator( - const platform::CUDAPlace& place, const gpuStream_t& stream, + const platform::CUDAPlace& place, gpuStream_t stream, bool create_if_not_found = false) { if (LIKELY(!IsCUDAGraphCapturing())) { if (stream == GetDefaultStream(place)) { @@ -407,14 +406,13 @@ class AllocatorFacadePrivate { return iter->second; } - const gpuStream_t& GetDefaultStream(const platform::CUDAPlace& place) const { + gpuStream_t GetDefaultStream(const platform::CUDAPlace& place) const { const std::shared_ptr& allocator = GetDefaultStreamSafeCUDAAllocator(place); return allocator->GetDefaultStream(); } - void SetDefaultStream(const platform::CUDAPlace& place, - const gpuStream_t& stream) { + void SetDefaultStream(const platform::CUDAPlace& place, gpuStream_t stream) { const std::shared_ptr& allocator = GetDefaultStreamSafeCUDAAllocator(place); allocator->SetDefaultStream(stream); @@ -424,7 +422,7 @@ class AllocatorFacadePrivate { } void RecordStream(std::shared_ptr allocation, - const gpuStream_t& stream) { + gpuStream_t stream) { std::shared_ptr stream_safe_cuda_allocation = std::dynamic_pointer_cast(allocation); if (stream_safe_cuda_allocation != nullptr) { @@ -434,7 +432,7 @@ class AllocatorFacadePrivate { } } - const gpuStream_t GetStream( + gpuStream_t GetStream( const std::shared_ptr& allocation) const { const std::shared_ptr stream_safe_cuda_allocation = @@ -1044,7 +1042,7 @@ bool AllocatorFacade::IsStreamSafeCUDAAllocatorUsed() { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) uint64_t AllocatorFacade::Release(const platform::CUDAPlace& place, - const gpuStream_t& stream) { + gpuStream_t stream) { AllocatorFacadePrivate* m = GetPrivate(); if (!m->IsStreamSafeCUDAAllocatorUsed()) { VLOG(6) << "Warning: StreamSafeCUDAAllocator is not used!"; @@ -1055,12 +1053,12 @@ uint64_t AllocatorFacade::Release(const platform::CUDAPlace& place, } void AllocatorFacade::RecordStream(std::shared_ptr allocation, - const gpuStream_t& stream) { + gpuStream_t stream) { GetPrivate()->RecordStream(allocation, stream); } const std::shared_ptr& AllocatorFacade::GetAllocator( - const platform::Place& place, const gpuStream_t& stream) { + const platform::Place& place, gpuStream_t stream) { AllocatorFacadePrivate* m = GetPrivate(); if (!m->IsStreamSafeCUDAAllocatorUsed()) { @@ -1075,13 +1073,13 @@ const std::shared_ptr& AllocatorFacade::GetAllocator( return m->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1); } -const gpuStream_t AllocatorFacade::GetStream( +gpuStream_t AllocatorFacade::GetStream( const std::shared_ptr& allocation) const { return GetPrivate()->GetStream(allocation); } void AllocatorFacade::SetDefaultStream(const platform::CUDAPlace& place, - const gpuStream_t& stream) { + gpuStream_t stream) { if (m_->IsStreamSafeCUDAAllocatorUsed()) { m_->SetDefaultStream(place, stream); } diff --git a/paddle/fluid/memory/allocation/allocator_facade.h b/paddle/fluid/memory/allocation/allocator_facade.h index d5c1e7c908c79a..1dea50edccf2eb 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.h +++ b/paddle/fluid/memory/allocation/allocator_facade.h @@ -80,15 +80,12 @@ class AllocatorFacade { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) // TODO(zhiqiu): change gpuStream_t to phi::Stream if needed. - uint64_t Release(const platform::CUDAPlace& place, const gpuStream_t& stream); - void RecordStream(std::shared_ptr allocation, - const gpuStream_t& stream); + uint64_t Release(const platform::CUDAPlace& place, gpuStream_t stream); + void RecordStream(std::shared_ptr allocation, gpuStream_t stream); const std::shared_ptr& GetAllocator(const platform::Place& place, - const gpuStream_t& stream); - const gpuStream_t GetStream( - const std::shared_ptr& allocation) const; - void SetDefaultStream(const platform::CUDAPlace& place, - const gpuStream_t& stream); + gpuStream_t stream); + gpuStream_t GetStream(const std::shared_ptr& allocation) const; + void SetDefaultStream(const platform::CUDAPlace& place, gpuStream_t stream); #endif #ifdef PADDLE_WITH_CUDA diff --git a/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.cc b/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.cc index 7e47d35176bac5..82233fd4fe8214 100644 --- a/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.cc +++ b/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.cc @@ -33,7 +33,7 @@ StreamSafeCUDAAllocation::StreamSafeCUDAAllocation( owning_stream_(std::move(owning_stream)), allocator_(allocator->shared_from_this()) {} -void StreamSafeCUDAAllocation::RecordStream(const gpuStream_t& stream) { +void StreamSafeCUDAAllocation::RecordStream(gpuStream_t stream) { VLOG(8) << "Try record stream " << stream << " for address " << ptr(); if (stream == owning_stream_) { return; @@ -90,7 +90,7 @@ bool StreamSafeCUDAAllocation::CanBeFreed() { return true; } -const gpuStream_t& StreamSafeCUDAAllocation::GetOwningStream() const { +gpuStream_t StreamSafeCUDAAllocation::GetOwningStream() const { return owning_stream_; } @@ -102,7 +102,7 @@ void StreamSafeCUDAAllocation::RecordGraphCapturingStreams() { } void StreamSafeCUDAAllocation::RecordStreamWithNoGraphCapturing( - const gpuStream_t& stream) { + gpuStream_t stream) { gpuEvent_t record_event; auto it = outstanding_event_map_.find(stream); if (it == outstanding_event_map_.end()) { @@ -154,11 +154,11 @@ StreamSafeCUDAAllocator::~StreamSafeCUDAAllocator() { bool StreamSafeCUDAAllocator::IsAllocThreadSafe() const { return true; } -const gpuStream_t& StreamSafeCUDAAllocator::GetDefaultStream() const { +gpuStream_t StreamSafeCUDAAllocator::GetDefaultStream() const { return default_stream_; } -void StreamSafeCUDAAllocator::SetDefaultStream(const gpuStream_t& stream) { +void StreamSafeCUDAAllocator::SetDefaultStream(gpuStream_t stream) { default_stream_ = stream; } diff --git a/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h b/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h index 65af32c701b756..32d3896e66bbf3 100644 --- a/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h +++ b/paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h @@ -39,13 +39,13 @@ class StreamSafeCUDAAllocation : public Allocation { gpuStream_t owning_stream, StreamSafeCUDAAllocator *allocator); - void RecordStream(const gpuStream_t &stream); + void RecordStream(gpuStream_t stream); bool CanBeFreed(); - const gpuStream_t &GetOwningStream() const; + gpuStream_t GetOwningStream() const; private: void RecordGraphCapturingStreams(); - void RecordStreamWithNoGraphCapturing(const gpuStream_t &stream); + void RecordStreamWithNoGraphCapturing(gpuStream_t stream); DecoratedAllocationPtr underlying_allocation_; std::set graph_capturing_stream_set_; std::map outstanding_event_map_; @@ -66,8 +66,8 @@ class StreamSafeCUDAAllocator ~StreamSafeCUDAAllocator(); bool IsAllocThreadSafe() const override; - const gpuStream_t &GetDefaultStream() const; - void SetDefaultStream(const gpuStream_t &stream); + gpuStream_t GetDefaultStream() const; + void SetDefaultStream(gpuStream_t stream); protected: phi::Allocation *AllocateImpl(size_t size) override; diff --git a/paddle/fluid/memory/malloc.cc b/paddle/fluid/memory/malloc.cc index f3de317dd1df5a..50180b4b6a1a6a 100644 --- a/paddle/fluid/memory/malloc.cc +++ b/paddle/fluid/memory/malloc.cc @@ -57,17 +57,16 @@ void* GetBasePtr(const std::shared_ptr& allocation) { } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -uint64_t Release(const platform::CUDAPlace& place, const gpuStream_t& stream) { +uint64_t Release(const platform::CUDAPlace& place, gpuStream_t stream) { return allocation::AllocatorFacade::Instance().Release(place, stream); } -void RecordStream(std::shared_ptr allocation, - const gpuStream_t& stream) { +void RecordStream(std::shared_ptr allocation, gpuStream_t stream) { return allocation::AllocatorFacade::Instance().RecordStream(allocation, stream); } -const gpuStream_t GetStream(const std::shared_ptr& allocation) { +gpuStream_t GetStream(const std::shared_ptr& allocation) { return allocation::AllocatorFacade::Instance().GetStream(allocation); } diff --git a/paddle/fluid/memory/malloc.h b/paddle/fluid/memory/malloc.h index e6d910579ba95c..796bdcf0ec2f68 100644 --- a/paddle/fluid/memory/malloc.h +++ b/paddle/fluid/memory/malloc.h @@ -50,13 +50,11 @@ extern bool InSameStream(const std::shared_ptr& allocation, extern void* GetBasePtr(const std::shared_ptr& allocation); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -extern uint64_t Release(const platform::CUDAPlace& place, - const gpuStream_t& stream); +extern uint64_t Release(const platform::CUDAPlace& place, gpuStream_t stream); -void RecordStream(std::shared_ptr allocation, - const gpuStream_t& stream); +void RecordStream(std::shared_ptr allocation, gpuStream_t stream); -const gpuStream_t GetStream(const std::shared_ptr& allocation); +gpuStream_t GetStream(const std::shared_ptr& allocation); #endif } // namespace memory } // namespace paddle From 0b0c27685eb2a57440f763e9cf095880bd46dac6 Mon Sep 17 00:00:00 2001 From: yaoxuefeng Date: Fri, 1 Apr 2022 11:28:42 +0800 Subject: [PATCH 6/6] modify api name of ps accessor (#41207) * modify api name of ps accessor * update * code format --- .../distributed/ps/service/brpc_ps_client.cc | 4 +- .../distributed/ps/service/brpc_ps_server.cc | 5 +- .../fluid/distributed/ps/service/ps_client.cc | 4 +- .../distributed/ps/service/ps_local_client.cc | 8 +- paddle/fluid/distributed/ps/table/accessor.h | 69 ++---- .../ps/table/common_dense_table.cc | 8 +- .../distributed/ps/table/ctr_accessor.cc | 169 ++++++++------- .../fluid/distributed/ps/table/ctr_accessor.h | 152 ++++++------- .../ps/table/ctr_double_accessor.cc | 204 +++++++++--------- .../ps/table/ctr_double_accessor.h | 170 +++++++-------- .../ps/table/downpour_ctr_accessor.cc | 191 ++++++++-------- .../ps/table/downpour_ctr_accessor.h | 160 +++++++------- .../ps/table/memory_sparse_table.cc | 64 +++--- .../distributed/ps/table/sparse_accessor.cc | 161 +++++++------- .../distributed/ps/table/sparse_accessor.h | 140 ++++++------ paddle/fluid/distributed/ps/table/table.cc | 2 +- .../distributed/ps/table/tensor_accessor.cc | 66 +++--- .../distributed/ps/table/tensor_accessor.h | 42 ++-- .../distributed/test/ctr_accessor_test.cc | 66 +++--- 19 files changed, 811 insertions(+), 874 deletions(-) mode change 100755 => 100644 paddle/fluid/distributed/ps/service/ps_local_client.cc diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_client.cc b/paddle/fluid/distributed/ps/service/brpc_ps_client.cc index d7d41d6bbd4a80..5a92afb297c7e2 100755 --- a/paddle/fluid/distributed/ps/service/brpc_ps_client.cc +++ b/paddle/fluid/distributed/ps/service/brpc_ps_client.cc @@ -1520,7 +1520,7 @@ void sparse_local_merge(ValueAccessor *accessor, float *merge_data, merge_data_shell[i] = merge_data + i; another_data_shell[i] = another_data + i; } - accessor->merge(merge_data_shell, another_data_shell, 1); + accessor->Merge(merge_data_shell, another_data_shell, 1); } int BrpcPsClient::push_sparse_async_shard_merge( @@ -1759,7 +1759,7 @@ void BrpcPsClient::push_dense_task_consume() { async_task]() -> int { auto &tmp_task_vec = *(async_task->data()); const float *merge_data = tmp_task_vec.data(); - accessor->merge(&total_send_data, &merge_data, + accessor->Merge(&total_send_data, &merge_data, total_send_data_size); #pragma optimize("", off) auto *debug_closure = closure; diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_server.cc b/paddle/fluid/distributed/ps/service/brpc_ps_server.cc index 0d7624baec5806..2e77020c307517 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_server.cc +++ b/paddle/fluid/distributed/ps/service/brpc_ps_server.cc @@ -206,7 +206,8 @@ int32_t BrpcPsService::pull_dense(Table *table, const PsRequestMessage &request, } auto res_data = butil::get_object>(); - res_data->resize(num * table->value_accesor()->select_size() / sizeof(float)); + res_data->resize(num * table->value_accesor()->GetTableInfo(SELECT_SIZE) / + sizeof(float)); TableContext table_context; table_context.value_type = Dense; table_context.pull_context.values = res_data->data(); @@ -385,7 +386,7 @@ int32_t BrpcPsService::pull_sparse(Table *table, CostTimer timer("pserver_server_pull_sparse"); uint32_t num = *(uint32_t *)(request.params(0).c_str()); - auto dim = table->value_accesor()->select_dim(); + auto dim = table->value_accesor()->GetTableInfo(SELECT_DIM); thread_local std::string req_buffer; req_buffer.reserve(req_buffer_size); diff --git a/paddle/fluid/distributed/ps/service/ps_client.cc b/paddle/fluid/distributed/ps/service/ps_client.cc index fd956b758de1ae..27f2d88fdd9fa0 100644 --- a/paddle/fluid/distributed/ps/service/ps_client.cc +++ b/paddle/fluid/distributed/ps/service/ps_client.cc @@ -46,8 +46,8 @@ int32_t PSClient::configure( auto *accessor = CREATE_PSCORE_CLASS( ValueAccessor, work_param.downpour_table_param(i).accessor().accessor_class()); - accessor->configure(work_param.downpour_table_param(i).accessor()); - accessor->initialize(); + accessor->Configure(work_param.downpour_table_param(i).accessor()); + accessor->Initialize(); _table_accessors[work_param.downpour_table_param(i).table_id()].reset( accessor); } diff --git a/paddle/fluid/distributed/ps/service/ps_local_client.cc b/paddle/fluid/distributed/ps/service/ps_local_client.cc old mode 100755 new mode 100644 index fe5cbe682ea67c..dbf47f0df41161 --- a/paddle/fluid/distributed/ps/service/ps_local_client.cc +++ b/paddle/fluid/distributed/ps/service/ps_local_client.cc @@ -174,7 +174,8 @@ ::std::future PsLocalClient::pull_dense(Region* regions, auto* accessor = table_accessor(table_id); auto* table_ptr = table(table_id); - uint32_t num_per_shard = dense_dim_per_shard(accessor->fea_dim(), 1); + uint32_t num_per_shard = + dense_dim_per_shard(accessor->GetTableInfo(FEA_DIM), 1); std::vector region_buffer; region_buffer.resize(num_per_shard); table_ptr->pull_dense(region_buffer.data(), region_buffer.size()); @@ -219,7 +220,8 @@ ::std::future PsLocalClient::push_dense_param(const Region* regions, auto* table_ptr = table(table_id); std::vector region_buffer; - region_buffer.resize(dense_dim_per_shard(accessor->fea_dim(), 1), 0); + region_buffer.resize(dense_dim_per_shard(accessor->GetTableInfo(FEA_DIM), 1), + 0); for (size_t i = 0, offset = 0; i < region_num; ++i) { uint32_t data_num = regions[i].size / sizeof(float); memcpy(region_buffer.data() + offset, regions[i].data, regions[i].size); @@ -252,7 +254,7 @@ ::std::future PsLocalClient::push_dense(const Region* regions, auto* table_ptr = table(table_id); std::vector region_buffer; - region_buffer.resize(dense_dim_per_shard(accessor->fea_dim(), 1)); + region_buffer.resize(dense_dim_per_shard(accessor->GetTableInfo(FEA_DIM), 1)); size_t data_size = region_buffer.size(); for (size_t i = 0, offset = 0; i < region_num; ++i) { uint32_t data_num = regions[i].size / sizeof(float); diff --git a/paddle/fluid/distributed/ps/table/accessor.h b/paddle/fluid/distributed/ps/table/accessor.h index 207cc94b4cb154..efc1e604dc9d01 100644 --- a/paddle/fluid/distributed/ps/table/accessor.h +++ b/paddle/fluid/distributed/ps/table/accessor.h @@ -72,7 +72,7 @@ class ValueAccessor { ValueAccessor() {} virtual ~ValueAccessor() {} - virtual int configure(const TableAccessorParameter& parameter) { + virtual int Configure(const TableAccessorParameter& parameter) { _config = parameter; // data_convert结构体初始化 if (_config.table_accessor_save_param_size() != 0) { @@ -88,38 +88,15 @@ class ValueAccessor { } return 0; } - virtual int initialize() = 0; + virtual int Initialize() = 0; virtual void SetTableInfo(AccessorInfo& info) = 0; virtual size_t GetTableInfo(InfoKey key) = 0; - // value维度 - virtual size_t dim() = 0; - // value各个维度的size - virtual size_t dim_size(size_t dim) = 0; - // value各维度相加总size - virtual size_t size() = 0; - - // value中mf动态长度部分总size大小, sparse下生效 - virtual size_t mf_size() { return 0; } - virtual bool need_extend_mf(float* value) { return false; } - virtual bool has_mf(size_t size) { return false; } - // pull value维度 - virtual size_t select_dim() = 0; - // pull value各个维度的size - virtual size_t select_dim_size(size_t dim) = 0; - // pull value各维度相加总size - virtual size_t select_size() = 0; - // push value维度 - virtual size_t update_dim() = 0; - // push value各个维度的size - virtual size_t update_dim_size(size_t dim) = 0; - // push value各维度相加总size - virtual size_t update_size() = 0; - // fea total for dense - virtual size_t fea_dim() { return _config.fea_dim(); } + virtual bool NeedExtendMF(float* value) { return false; } + virtual bool HasMF(size_t size) { return false; } // converter for save - virtual std::string get_converter(int param) { + virtual std::string GetConverter(int param) { auto itr = _data_coverter_map.find(param); if (itr == _data_coverter_map.end()) { return ""; @@ -128,7 +105,7 @@ class ValueAccessor { } } // deconverter for load - virtual std::string get_deconverter(int param) { + virtual std::string GetDeconverter(int param) { auto itr = _data_coverter_map.find(param); if (itr == _data_coverter_map.end()) { return ""; @@ -137,47 +114,47 @@ class ValueAccessor { } } // 判断该value是否进行shrink - virtual bool shrink(float* value) = 0; + virtual bool Shrink(float* value) = 0; // 判断该value是否在save阶段dump, // param作为参数用于标识save阶段,如downpour的xbox与batch_model - virtual bool save(float* value, int param) = 0; + virtual bool Save(float* value, int param) = 0; // update delta_score and unseen_days after save - virtual void update_stat_after_save(float* value, int param) {} + virtual void UpdateStatAfterSave(float* value, int param) {} // keys不存在时,为values生成随机值 - virtual int32_t create(float** value, size_t num) = 0; - virtual bool create_value(int type, const float* value) { return true; } + virtual int32_t Create(float** value, size_t num) = 0; + virtual bool CreateValue(int type, const float* value) { return true; } // 从values中选取到select_values中 - virtual int32_t select(float** select_values, const float** values, + virtual int32_t Select(float** select_values, const float** values, size_t num) = 0; // 将update_values聚合到一起 - virtual int32_t merge(float** update_values, + virtual int32_t Merge(float** update_values, const float** other_update_values, size_t num) = 0; // 将update_values聚合到一起,通过it.next判定是否进入下一个key - // virtual int32_t merge(float** update_values, iterator it); + // virtual int32_t Merge(float** update_values, iterator it); // 将update_values更新应用到values中 - virtual int32_t update(float** values, const float** update_values, + virtual int32_t Update(float** values, const float** update_values, size_t num) = 0; // used to save model, will filter feature - virtual std::string parse_to_string(const float* value, int param) = 0; + virtual std::string ParseToString(const float* value, int param) = 0; // parse value from string, used to load model - virtual int32_t parse_from_string(const std::string& data, float* value) = 0; + virtual int32_t ParseFromString(const std::string& data, float* value) = 0; - virtual FsDataConverter converter(int param) { + virtual FsDataConverter Converter(int param) { FsDataConverter data_convert; - data_convert.converter = this->get_converter(param); - data_convert.deconverter = this->get_deconverter(param); + data_convert.converter = this->GetConverter(param); + data_convert.deconverter = this->GetDeconverter(param); return data_convert; } - virtual int set_weight(float** values, const float** update_values, - size_t num) { + virtual int SetWeight(float** values, const float** update_values, + size_t num) { return 0; } - virtual float get_field(float* value, const std::string& name) { return 0.0; } + virtual float GetField(float* value, const std::string& name) { return 0.0; } #define DEFINE_GET_INDEX(class, field) \ virtual int get_##field##_index() override { return class ::field##_index(); } diff --git a/paddle/fluid/distributed/ps/table/common_dense_table.cc b/paddle/fluid/distributed/ps/table/common_dense_table.cc index a462fc50aeb721..caec575e33eef1 100644 --- a/paddle/fluid/distributed/ps/table/common_dense_table.cc +++ b/paddle/fluid/distributed/ps/table/common_dense_table.cc @@ -232,9 +232,9 @@ int32_t CommonDenseTable::load(const std::string& path, int load_param = atoi(param.c_str()); FsChannelConfig channel_config; - channel_config.converter = _value_accesor->converter(load_param).converter; + channel_config.converter = _value_accesor->Converter(load_param).converter; channel_config.deconverter = - _value_accesor->converter(load_param).deconverter; + _value_accesor->Converter(load_param).deconverter; bool is_read_failed = false; int err_no = 0; int retry_num = 0; @@ -329,9 +329,9 @@ int32_t CommonDenseTable::save(const std::string& path, "%s/part-%03d", table_dir(path).c_str(), _shard_idx); } _afs_client.remove(channel_config.path); - channel_config.converter = _value_accesor->converter(save_param).converter; + channel_config.converter = _value_accesor->Converter(save_param).converter; channel_config.deconverter = - _value_accesor->converter(save_param).deconverter; + _value_accesor->Converter(save_param).deconverter; bool is_write_failed = false; std::vector> result_buffer_param( diff --git a/paddle/fluid/distributed/ps/table/ctr_accessor.cc b/paddle/fluid/distributed/ps/table/ctr_accessor.cc index ffb97914fb8c02..8380177963ed90 100644 --- a/paddle/fluid/distributed/ps/table/ctr_accessor.cc +++ b/paddle/fluid/distributed/ps/table/ctr_accessor.cc @@ -20,7 +20,7 @@ namespace paddle { namespace distributed { -int CtrCommonAccessor::initialize() { +int CtrCommonAccessor::Initialize() { auto name = _config.embed_sgd_param().name(); _embed_sgd_rule = CREATE_PSCORE_CLASS(SparseValueSGDRule, name); _embed_sgd_rule->load_config(_config.embed_sgd_param(), 1); @@ -39,73 +39,72 @@ int CtrCommonAccessor::initialize() { } void CtrCommonAccessor::SetTableInfo(AccessorInfo& info) { - info.dim = dim(); - info.size = size(); - info.select_dim = select_dim(); - info.select_size = select_size(); - info.update_dim = update_dim(); - info.update_size = update_size(); - info.mf_size = mf_size(); - info.fea_dim = fea_dim(); + info.dim = Dim(); + info.size = Size(); + info.select_dim = SelectDim(); + info.select_size = SelectSize(); + info.update_dim = UpdateDim(); + info.update_size = UpdateSize(); + info.mf_size = MFSize(); } size_t CtrCommonAccessor::GetTableInfo(InfoKey key) { switch (key) { case DIM: - return dim(); + return Dim(); case SIZE: - return size(); + return Size(); case SELECT_DIM: - return select_dim(); + return SelectDim(); case SELECT_SIZE: - return select_size(); + return SelectSize(); case UPDATE_DIM: - return update_dim(); + return UpdateDim(); case UPDATE_SIZE: - return update_size(); + return UpdateSize(); case MF_SIZE: - return mf_size(); - case FEA_DIM: - return fea_dim(); + return MFSize(); + default: + return 0; } return 0; } -size_t CtrCommonAccessor::dim() { return common_feature_value.dim(); } +size_t CtrCommonAccessor::Dim() { return common_feature_value.Dim(); } -size_t CtrCommonAccessor::dim_size(size_t dim) { +size_t CtrCommonAccessor::DimSize(size_t dim) { auto embedx_dim = _config.embedx_dim(); - return common_feature_value.dim_size(dim, embedx_dim); + return common_feature_value.DimSize(dim, embedx_dim); } -size_t CtrCommonAccessor::size() { return common_feature_value.size(); } +size_t CtrCommonAccessor::Size() { return common_feature_value.Size(); } -size_t CtrCommonAccessor::mf_size() { +size_t CtrCommonAccessor::MFSize() { return (_config.embedx_dim() + common_feature_value.embedx_sgd_dim) * sizeof(float); // embedx embedx_g2sum } // pull value -size_t CtrCommonAccessor::select_dim() { +size_t CtrCommonAccessor::SelectDim() { auto embedx_dim = _config.embedx_dim(); return 3 + embedx_dim; } -size_t CtrCommonAccessor::select_dim_size(size_t dim) { return sizeof(float); } +size_t CtrCommonAccessor::SelectDimSize(size_t dim) { return sizeof(float); } -size_t CtrCommonAccessor::select_size() { return select_dim() * sizeof(float); } +size_t CtrCommonAccessor::SelectSize() { return SelectDim() * sizeof(float); } // push value -size_t CtrCommonAccessor::update_dim() { +size_t CtrCommonAccessor::UpdateDim() { auto embedx_dim = _config.embedx_dim(); return 4 + embedx_dim; } -size_t CtrCommonAccessor::update_dim_size(size_t dim) { return sizeof(float); } +size_t CtrCommonAccessor::UpdateDimSize(size_t dim) { return sizeof(float); } -size_t CtrCommonAccessor::update_size() { return update_dim() * sizeof(float); } +size_t CtrCommonAccessor::UpdateSize() { return UpdateDim() * sizeof(float); } -bool CtrCommonAccessor::shrink(float* value) { +bool CtrCommonAccessor::Shrink(float* value) { auto base_threshold = _config.ctr_accessor_param().base_threshold(); auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delete_after_unseen_days = @@ -113,12 +112,12 @@ bool CtrCommonAccessor::shrink(float* value) { auto delete_threshold = _config.ctr_accessor_param().delete_threshold(); // time_decay first - common_feature_value.show(value) *= _show_click_decay_rate; - common_feature_value.click(value) *= _show_click_decay_rate; + common_feature_value.Show(value) *= _show_click_decay_rate; + common_feature_value.Click(value) *= _show_click_decay_rate; // shrink after - auto score = show_click_score(common_feature_value.show(value), - common_feature_value.click(value)); + auto score = show_click_score(common_feature_value.Show(value), + common_feature_value.Click(value)); auto unseen_days = common_feature_value.unseen_days(value); if (score < delete_threshold || unseen_days > delete_after_unseen_days) { return true; @@ -126,7 +125,7 @@ bool CtrCommonAccessor::shrink(float* value) { return false; } -bool CtrCommonAccessor::save(float* value, int param) { +bool CtrCommonAccessor::Save(float* value, int param) { auto base_threshold = _config.ctr_accessor_param().base_threshold(); auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delta_keep_days = _config.ctr_accessor_param().delta_keep_days(); @@ -142,8 +141,8 @@ bool CtrCommonAccessor::save(float* value, int param) { case 1: // save xbox base case 2: { - if (show_click_score(common_feature_value.show(value), - common_feature_value.click(value)) >= + if (show_click_score(common_feature_value.Show(value), + common_feature_value.Click(value)) >= base_threshold && common_feature_value.delta_score(value) >= delta_threshold && common_feature_value.unseen_days(value) <= delta_keep_days) { @@ -171,7 +170,7 @@ bool CtrCommonAccessor::save(float* value, int param) { } } -void CtrCommonAccessor::update_stat_after_save(float* value, int param) { +void CtrCommonAccessor::UpdateStatAfterSave(float* value, int param) { auto base_threshold = _config.ctr_accessor_param().base_threshold(); auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delta_keep_days = _config.ctr_accessor_param().delta_keep_days(); @@ -180,8 +179,8 @@ void CtrCommonAccessor::update_stat_after_save(float* value, int param) { } switch (param) { case 1: { - if (show_click_score(common_feature_value.show(value), - common_feature_value.click(value)) >= + if (show_click_score(common_feature_value.Show(value), + common_feature_value.Click(value)) >= base_threshold && common_feature_value.delta_score(value) >= delta_threshold && common_feature_value.unseen_days(value) <= delta_keep_days) { @@ -198,52 +197,52 @@ void CtrCommonAccessor::update_stat_after_save(float* value, int param) { } } -int32_t CtrCommonAccessor::create(float** values, size_t num) { +int32_t CtrCommonAccessor::Create(float** values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* value = values[value_item]; value[common_feature_value.unseen_days_index()] = 0; value[common_feature_value.delta_score_index()] = 0; - value[common_feature_value.show_index()] = 0; - value[common_feature_value.click_index()] = 0; - value[common_feature_value.slot_index()] = -1; + value[common_feature_value.ShowIndex()] = 0; + value[common_feature_value.ClickIndex()] = 0; + value[common_feature_value.SlotIndex()] = -1; _embed_sgd_rule->init_value( - value + common_feature_value.embed_w_index(), + value + common_feature_value.Embed_W_Index(), value + common_feature_value.embed_g2sum_index()); _embedx_sgd_rule->init_value( - value + common_feature_value.embedx_w_index(), + value + common_feature_value.Embedx_W_Index(), value + common_feature_value.embedx_g2sum_index(), false); } return 0; } -bool CtrCommonAccessor::need_extend_mf(float* value) { - float show = value[common_feature_value.show_index()]; - float click = value[common_feature_value.click_index()]; +bool CtrCommonAccessor::NeedExtendMF(float* value) { + float show = value[common_feature_value.ShowIndex()]; + float click = value[common_feature_value.ClickIndex()]; float score = (show - click) * _config.ctr_accessor_param().nonclk_coeff() + click * _config.ctr_accessor_param().click_coeff(); return score >= _config.embedx_threshold(); } -bool CtrCommonAccessor::has_mf(size_t size) { +bool CtrCommonAccessor::HasMF(size_t size) { return size > common_feature_value.embedx_g2sum_index(); } // from CommonFeatureValue to CtrCommonPullValue -int32_t CtrCommonAccessor::select(float** select_values, const float** values, +int32_t CtrCommonAccessor::Select(float** select_values, const float** values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* select_value = select_values[value_item]; const float* value = values[value_item]; - select_value[CtrCommonPullValue::show_index()] = - value[common_feature_value.show_index()]; - select_value[CtrCommonPullValue::click_index()] = - value[common_feature_value.click_index()]; - select_value[CtrCommonPullValue::embed_w_index()] = - value[common_feature_value.embed_w_index()]; - memcpy(select_value + CtrCommonPullValue::embedx_w_index(), - value + common_feature_value.embedx_w_index(), + select_value[CtrCommonPullValue::ShowIndex()] = + value[common_feature_value.ShowIndex()]; + select_value[CtrCommonPullValue::ClickIndex()] = + value[common_feature_value.ClickIndex()]; + select_value[CtrCommonPullValue::Embed_W_Index()] = + value[common_feature_value.Embed_W_Index()]; + memcpy(select_value + CtrCommonPullValue::Embedx_W_Index(), + value + common_feature_value.Embedx_W_Index(), embedx_dim * sizeof(float)); } return 0; @@ -252,16 +251,16 @@ int32_t CtrCommonAccessor::select(float** select_values, const float** values, // from CtrCommonPushValue to CtrCommonPushValue // first dim: item // second dim: field num -int32_t CtrCommonAccessor::merge(float** update_values, +int32_t CtrCommonAccessor::Merge(float** update_values, const float** other_update_values, size_t num) { auto embedx_dim = _config.embedx_dim(); - size_t total_dim = CtrCommonPushValue::dim(embedx_dim); + size_t total_dim = CtrCommonPushValue::Dim(embedx_dim); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* other_update_value = other_update_values[value_item]; for (auto i = 0u; i < total_dim; ++i) { - if (i != CtrCommonPushValue::slot_index()) { + if (i != CtrCommonPushValue::SlotIndex()) { update_value[i] += other_update_value[i]; } } @@ -272,43 +271,43 @@ int32_t CtrCommonAccessor::merge(float** update_values, // from CtrCommonPushValue to CommonFeatureValue // first dim: item // second dim: field num -int32_t CtrCommonAccessor::update(float** update_values, +int32_t CtrCommonAccessor::Update(float** update_values, const float** push_values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* push_value = push_values[value_item]; - float push_show = push_value[CtrCommonPushValue::show_index()]; - float push_click = push_value[CtrCommonPushValue::click_index()]; - float slot = push_value[CtrCommonPushValue::slot_index()]; - update_value[common_feature_value.show_index()] += push_show; - update_value[common_feature_value.click_index()] += push_click; - update_value[common_feature_value.slot_index()] = slot; + float push_show = push_value[CtrCommonPushValue::ShowIndex()]; + float push_click = push_value[CtrCommonPushValue::ClickIndex()]; + float slot = push_value[CtrCommonPushValue::SlotIndex()]; + update_value[common_feature_value.ShowIndex()] += push_show; + update_value[common_feature_value.ClickIndex()] += push_click; + update_value[common_feature_value.SlotIndex()] = slot; update_value[common_feature_value.delta_score_index()] += (push_show - push_click) * _config.ctr_accessor_param().nonclk_coeff() + push_click * _config.ctr_accessor_param().click_coeff(); update_value[common_feature_value.unseen_days_index()] = 0; _embed_sgd_rule->update_value( - update_value + common_feature_value.embed_w_index(), + update_value + common_feature_value.Embed_W_Index(), update_value + common_feature_value.embed_g2sum_index(), - push_value + CtrCommonPushValue::embed_g_index()); + push_value + CtrCommonPushValue::Embed_G_Index()); _embedx_sgd_rule->update_value( - update_value + common_feature_value.embedx_w_index(), + update_value + common_feature_value.Embedx_W_Index(), update_value + common_feature_value.embedx_g2sum_index(), - push_value + CtrCommonPushValue::embedx_g_index()); + push_value + CtrCommonPushValue::Embedx_G_Index()); } return 0; } -bool CtrCommonAccessor::create_value(int stage, const float* value) { +bool CtrCommonAccessor::CreateValue(int stage, const float* value) { // stage == 0, pull // stage == 1, push if (stage == 0) { return true; } else if (stage == 1) { // operation - auto show = CtrCommonPushValue::show(const_cast(value)); - auto click = CtrCommonPushValue::click(const_cast(value)); + auto show = CtrCommonPushValue::Show(const_cast(value)); + auto click = CtrCommonPushValue::Click(const_cast(value)); auto score = show_click_score(show, click); if (score <= 0) { return false; @@ -329,34 +328,34 @@ float CtrCommonAccessor::show_click_score(float show, float click) { return (show - click) * nonclk_coeff + click * click_coeff; } -std::string CtrCommonAccessor::parse_to_string(const float* v, int param) { +std::string CtrCommonAccessor::ParseToString(const float* v, int param) { thread_local std::ostringstream os; os.clear(); os.str(""); os << v[0] << " " << v[1] << " " << v[2] << " " << v[3] << " " << v[4] << " " << v[5]; for (int i = common_feature_value.embed_g2sum_index(); - i < common_feature_value.embedx_w_index(); i++) { + i < common_feature_value.Embedx_W_Index(); i++) { os << " " << v[i]; } - auto show = common_feature_value.show(const_cast(v)); - auto click = common_feature_value.click(const_cast(v)); + auto show = common_feature_value.Show(const_cast(v)); + auto click = common_feature_value.Click(const_cast(v)); auto score = show_click_score(show, click); if (score >= _config.embedx_threshold() && - param > common_feature_value.embedx_w_index()) { - for (auto i = common_feature_value.embedx_w_index(); - i < common_feature_value.dim(); ++i) { + param > common_feature_value.Embedx_W_Index()) { + for (auto i = common_feature_value.Embedx_W_Index(); + i < common_feature_value.Dim(); ++i) { os << " " << v[i]; } } return os.str(); } -int CtrCommonAccessor::parse_from_string(const std::string& str, float* value) { +int CtrCommonAccessor::ParseFromString(const std::string& str, float* value) { int embedx_dim = _config.embedx_dim(); _embedx_sgd_rule->init_value( - value + common_feature_value.embedx_w_index(), + value + common_feature_value.Embedx_W_Index(), value + common_feature_value.embedx_g2sum_index()); auto ret = paddle::string::str_to_float(str.data(), value); CHECK(ret >= 6) << "expect more than 6 real:" << ret; diff --git a/paddle/fluid/distributed/ps/table/ctr_accessor.h b/paddle/fluid/distributed/ps/table/ctr_accessor.h index a2121b21d9fe6f..21dfc6a5c1c385 100644 --- a/paddle/fluid/distributed/ps/table/ctr_accessor.h +++ b/paddle/fluid/distributed/ps/table/ctr_accessor.h @@ -40,27 +40,27 @@ class CtrCommonAccessor : public ValueAccessor { std::float embedx_g2sum; */ - int dim() { return 6 + embed_sgd_dim + embedx_sgd_dim + embedx_dim; } - int dim_size(size_t dim, int embedx_dim) { return sizeof(float); } - int size() { return dim() * sizeof(float); } - int slot_index() { return 0; } - int unseen_days_index() { return slot_index() + 1; } + int Dim() { return 6 + embed_sgd_dim + embedx_sgd_dim + embedx_dim; } + int DimSize(size_t dim, int embedx_dim) { return sizeof(float); } + int Size() { return Dim() * sizeof(float); } + int SlotIndex() { return 0; } + int unseen_days_index() { return SlotIndex() + 1; } int delta_score_index() { return unseen_days_index() + 1; } - int show_index() { return delta_score_index() + 1; } - int click_index() { return show_index() + 1; } - int embed_w_index() { return click_index() + 1; } - int embed_g2sum_index() { return embed_w_index() + 1; } - int embedx_w_index() { return embed_g2sum_index() + embed_sgd_dim; } - int embedx_g2sum_index() { return embedx_w_index() + embedx_dim; } + int ShowIndex() { return delta_score_index() + 1; } + int ClickIndex() { return ShowIndex() + 1; } + int Embed_W_Index() { return ClickIndex() + 1; } + int embed_g2sum_index() { return Embed_W_Index() + 1; } + int Embedx_W_Index() { return embed_g2sum_index() + embed_sgd_dim; } + int embedx_g2sum_index() { return Embedx_W_Index() + embedx_dim; } float& unseen_days(float* val) { return val[unseen_days_index()]; } float& delta_score(float* val) { return val[delta_score_index()]; } - float& show(float* val) { return val[show_index()]; } - float& click(float* val) { return val[click_index()]; } - float& slot(float* val) { return val[slot_index()]; } - float& embed_w(float* val) { return val[embed_w_index()]; } + float& Show(float* val) { return val[ShowIndex()]; } + float& Click(float* val) { return val[ClickIndex()]; } + float& Slot(float* val) { return val[SlotIndex()]; } + float& EmbedW(float* val) { return val[Embed_W_Index()]; } float& embed_g2sum(float* val) { return val[embed_g2sum_index()]; } - float& embedx_w(float* val) { return val[embedx_w_index()]; } + float& EmbedxW(float* val) { return val[Embedx_W_Index()]; } float& embedx_g2sum(float* val) { return val[embedx_g2sum_index()]; } int embed_sgd_dim; @@ -77,31 +77,31 @@ class CtrCommonAccessor : public ValueAccessor { std::vector embedx_g; */ - static int dim(int embedx_dim) { return 4 + embedx_dim; } + static int Dim(int embedx_dim) { return 4 + embedx_dim; } - static int dim_size(int dim, int embedx_dim) { return sizeof(float); } - static int size(int embedx_dim) { return dim(embedx_dim) * sizeof(float); } - static int slot_index() { return 0; } - static int show_index() { return CtrCommonPushValue::slot_index() + 1; } - static int click_index() { return CtrCommonPushValue::show_index() + 1; } - static int embed_g_index() { return CtrCommonPushValue::click_index() + 1; } - static int embedx_g_index() { - return CtrCommonPushValue::embed_g_index() + 1; + static int DimSize(int dim, int embedx_dim) { return sizeof(float); } + static int Size(int embedx_dim) { return Dim(embedx_dim) * sizeof(float); } + static int SlotIndex() { return 0; } + static int ShowIndex() { return CtrCommonPushValue::SlotIndex() + 1; } + static int ClickIndex() { return CtrCommonPushValue::ShowIndex() + 1; } + static int Embed_G_Index() { return CtrCommonPushValue::ClickIndex() + 1; } + static int Embedx_G_Index() { + return CtrCommonPushValue::Embed_G_Index() + 1; } - static float& slot(float* val) { - return val[CtrCommonPushValue::slot_index()]; + static float& Slot(float* val) { + return val[CtrCommonPushValue::SlotIndex()]; } - static float& show(float* val) { - return val[CtrCommonPushValue::show_index()]; + static float& Show(float* val) { + return val[CtrCommonPushValue::ShowIndex()]; } - static float& click(float* val) { - return val[CtrCommonPushValue::click_index()]; + static float& Click(float* val) { + return val[CtrCommonPushValue::ClickIndex()]; } - static float& embed_g(float* val) { - return val[CtrCommonPushValue::embed_g_index()]; + static float& EmbedG(float* val) { + return val[CtrCommonPushValue::Embed_G_Index()]; } - static float* embedx_g(float* val) { - return val + CtrCommonPushValue::embedx_g_index(); + static float* EmbedxG(float* val) { + return val + CtrCommonPushValue::Embedx_G_Index(); } }; @@ -113,90 +113,90 @@ class CtrCommonAccessor : public ValueAccessor { std::vector embedx_w; */ - static int dim(int embedx_dim) { return 3 + embedx_dim; } - static int dim_size(size_t dim) { return sizeof(float); } - static int size(int embedx_dim) { return dim(embedx_dim) * sizeof(float); } - static int show_index() { return 0; } - static int click_index() { return 1; } - static int embed_w_index() { return 2; } - static int embedx_w_index() { return 3; } - static float& show(float* val) { - return val[CtrCommonPullValue::show_index()]; + static int Dim(int embedx_dim) { return 3 + embedx_dim; } + static int DimSize(size_t dim) { return sizeof(float); } + static int Size(int embedx_dim) { return Dim(embedx_dim) * sizeof(float); } + static int ShowIndex() { return 0; } + static int ClickIndex() { return 1; } + static int Embed_W_Index() { return 2; } + static int Embedx_W_Index() { return 3; } + static float& Show(float* val) { + return val[CtrCommonPullValue::ShowIndex()]; } - static float& click(float* val) { - return val[CtrCommonPullValue::click_index()]; + static float& Click(float* val) { + return val[CtrCommonPullValue::ClickIndex()]; } - static float& embed_w(float* val) { - return val[CtrCommonPullValue::embed_w_index()]; + static float& EmbedW(float* val) { + return val[CtrCommonPullValue::Embed_W_Index()]; } - static float* embedx_w(float* val) { - return val + CtrCommonPullValue::embedx_w_index(); + static float* EmbedxW(float* val) { + return val + CtrCommonPullValue::Embedx_W_Index(); } }; CtrCommonAccessor() {} - virtual int initialize(); + virtual int Initialize(); virtual ~CtrCommonAccessor() {} virtual void SetTableInfo(AccessorInfo& info); virtual size_t GetTableInfo(InfoKey key); // value维度 - virtual size_t dim(); + size_t Dim(); // value各个维度的size - virtual size_t dim_size(size_t dim); + size_t DimSize(size_t dim); // value各维度相加总size - virtual size_t size(); + size_t Size(); // value中mf动态长度部分总size大小, sparse下生效 - virtual size_t mf_size(); + size_t MFSize(); // pull value维度 - virtual size_t select_dim(); + size_t SelectDim(); // pull value各个维度的size - virtual size_t select_dim_size(size_t dim); + size_t SelectDimSize(size_t dim); // pull value各维度相加总size - virtual size_t select_size(); + size_t SelectSize(); // push value维度 - virtual size_t update_dim(); + size_t UpdateDim(); // push value各个维度的size - virtual size_t update_dim_size(size_t dim); + size_t UpdateDimSize(size_t dim); // push value各维度相加总size - virtual size_t update_size(); + size_t UpdateSize(); // 判断该value是否进行shrink - virtual bool shrink(float* value); + virtual bool Shrink(float* value); // 判断该value是否保存到ssd // virtual bool save_ssd(float* value); - virtual bool need_extend_mf(float* value); - virtual bool has_mf(size_t size); + virtual bool NeedExtendMF(float* value); + virtual bool HasMF(size_t size); // 判断该value是否在save阶段dump, // param作为参数用于标识save阶段,如downpour的xbox与batch_model // param = 0, save all feature // param = 1, save delta feature // param = 2, save xbox base feature - bool save(float* value, int param) override; + bool Save(float* value, int param) override; // update delta_score and unseen_days after save - void update_stat_after_save(float* value, int param) override; + void UpdateStatAfterSave(float* value, int param) override; // keys不存在时,为values生成随机值 // 要求value的内存由外部调用者分配完毕 - virtual int32_t create(float** value, size_t num); + virtual int32_t Create(float** value, size_t num); // 从values中选取到select_values中 - virtual int32_t select(float** select_values, const float** values, + virtual int32_t Select(float** select_values, const float** values, size_t num); // 将update_values聚合到一起 - virtual int32_t merge(float** update_values, + virtual int32_t Merge(float** update_values, const float** other_update_values, size_t num); // 将update_values聚合到一起,通过it.next判定是否进入下一个key - // virtual int32_t merge(float** update_values, iterator it); + // virtual int32_t Merge(float** update_values, iterator it); // 将update_values更新应用到values中 - virtual int32_t update(float** values, const float** update_values, + virtual int32_t Update(float** values, const float** update_values, size_t num); - std::string parse_to_string(const float* value, int param) override; - int32_t parse_from_string(const std::string& str, float* v) override; - virtual bool create_value(int type, const float* value); + std::string ParseToString(const float* value, int param) override; + int32_t ParseFromString(const std::string& str, float* v) override; + virtual bool CreateValue(int type, const float* value); // 这个接口目前只用来取show - float get_field(float* value, const std::string& name) override { + float GetField(float* value, const std::string& name) override { // CHECK(name == "show"); if (name == "show") { - return common_feature_value.show(value); + return common_feature_value.Show(value); } return 0.0; } diff --git a/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc b/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc index 0e3df6e82521de..ed21a6dac317ec 100644 --- a/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc +++ b/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc @@ -20,7 +20,7 @@ namespace paddle { namespace distributed { -int DownpourCtrDoubleAccessor::initialize() { +int DownpourCtrDoubleAccessor::Initialize() { auto name = _config.embed_sgd_param().name(); _embed_sgd_rule = CREATE_PSCORE_CLASS(SparseValueSGDRule, name); _embed_sgd_rule->load_config(_config.embed_sgd_param(), 1); @@ -38,76 +38,75 @@ int DownpourCtrDoubleAccessor::initialize() { } void DownpourCtrDoubleAccessor::SetTableInfo(AccessorInfo& info) { - info.dim = dim(); - info.size = size(); - info.select_dim = select_dim(); - info.select_size = select_size(); - info.update_dim = update_dim(); - info.update_size = update_size(); - info.mf_size = mf_size(); - info.fea_dim = fea_dim(); + info.dim = Dim(); + info.size = Size(); + info.select_dim = SelectDim(); + info.select_size = SelectSize(); + info.update_dim = UpdateDim(); + info.update_size = UpdateSize(); + info.mf_size = MFSize(); } size_t DownpourCtrDoubleAccessor::GetTableInfo(InfoKey key) { switch (key) { case DIM: - return dim(); + return Dim(); case SIZE: - return size(); + return Size(); case SELECT_DIM: - return select_dim(); + return SelectDim(); case SELECT_SIZE: - return select_size(); + return SelectSize(); case UPDATE_DIM: - return update_dim(); + return UpdateDim(); case UPDATE_SIZE: - return update_size(); + return UpdateSize(); case MF_SIZE: - return mf_size(); - case FEA_DIM: - return fea_dim(); + return MFSize(); + default: + return 0; } return 0; } -size_t DownpourCtrDoubleAccessor::dim() { +size_t DownpourCtrDoubleAccessor::Dim() { auto embedx_dim = _config.embedx_dim(); - return DownpourCtrDoubleFeatureValue::dim(embedx_dim); + return DownpourCtrDoubleFeatureValue::Dim(embedx_dim); } -size_t DownpourCtrDoubleAccessor::dim_size(size_t dim) { +size_t DownpourCtrDoubleAccessor::DimSize(size_t dim) { auto embedx_dim = _config.embedx_dim(); - return DownpourCtrDoubleFeatureValue::dim_size(dim, embedx_dim); + return DownpourCtrDoubleFeatureValue::DimSize(dim, embedx_dim); } -size_t DownpourCtrDoubleAccessor::size() { +size_t DownpourCtrDoubleAccessor::Size() { auto embedx_dim = _config.embedx_dim(); - return DownpourCtrDoubleFeatureValue::size(embedx_dim); + return DownpourCtrDoubleFeatureValue::Size(embedx_dim); } -size_t DownpourCtrDoubleAccessor::mf_size() { +size_t DownpourCtrDoubleAccessor::MFSize() { return (_config.embedx_dim() + 1) * sizeof(float); // embedx embedx_g2sum } // pull value -size_t DownpourCtrDoubleAccessor::select_dim() { +size_t DownpourCtrDoubleAccessor::SelectDim() { auto embedx_dim = _config.embedx_dim(); return 3 + embedx_dim; } -size_t DownpourCtrDoubleAccessor::select_dim_size(size_t dim) { +size_t DownpourCtrDoubleAccessor::SelectDimSize(size_t dim) { return sizeof(float); } -size_t DownpourCtrDoubleAccessor::select_size() { - return select_dim() * sizeof(float); +size_t DownpourCtrDoubleAccessor::SelectSize() { + return SelectDim() * sizeof(float); } // push value -size_t DownpourCtrDoubleAccessor::update_dim() { +size_t DownpourCtrDoubleAccessor::UpdateDim() { auto embedx_dim = _config.embedx_dim(); return 4 + embedx_dim; } -size_t DownpourCtrDoubleAccessor::update_dim_size(size_t dim) { +size_t DownpourCtrDoubleAccessor::UpdateDimSize(size_t dim) { return sizeof(float); } -size_t DownpourCtrDoubleAccessor::update_size() { - return update_dim() * sizeof(float); +size_t DownpourCtrDoubleAccessor::UpdateSize() { + return UpdateDim() * sizeof(float); } -bool DownpourCtrDoubleAccessor::shrink(float* value) { +bool DownpourCtrDoubleAccessor::Shrink(float* value) { // auto base_threshold = _config.ctr_accessor_param().base_threshold(); // auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); // auto delete_threshold = _config.ctr_accessor_param().delete_threshold(); @@ -117,11 +116,11 @@ bool DownpourCtrDoubleAccessor::shrink(float* value) { _config.ctr_accessor_param().delete_after_unseen_days(); auto delete_threshold = _config.ctr_accessor_param().delete_threshold(); // time_decay first - DownpourCtrDoubleFeatureValue::show(value) *= _show_click_decay_rate; - DownpourCtrDoubleFeatureValue::click(value) *= _show_click_decay_rate; + DownpourCtrDoubleFeatureValue::Show(value) *= _show_click_decay_rate; + DownpourCtrDoubleFeatureValue::Click(value) *= _show_click_decay_rate; // shrink after - auto score = show_click_score(DownpourCtrDoubleFeatureValue::show(value), - DownpourCtrDoubleFeatureValue::click(value)); + auto score = show_click_score(DownpourCtrDoubleFeatureValue::Show(value), + DownpourCtrDoubleFeatureValue::Click(value)); auto unseen_days = DownpourCtrDoubleFeatureValue::unseen_days(value); if (score < delete_threshold || unseen_days > delete_after_unseen_days) { return true; @@ -139,16 +138,16 @@ bool DownpourCtrDoubleAccessor::save_ssd(float* value) { // float* value, int param, double global_cache_threshold) { // auto base_threshold = _config.ctr_accessor_param().base_threshold(); // auto delta_keep_days = _config.ctr_accessor_param().delta_keep_days(); -// if (show_click_score(DownpourCtrDoubleFeatureValue::show(value), -// DownpourCtrDoubleFeatureValue::click(value)) >= base_threshold +// if (show_click_score(DownpourCtrDoubleFeatureValue::Show(value), +// DownpourCtrDoubleFeatureValue::Click(value)) >= base_threshold // && DownpourCtrDoubleFeatureValue::unseen_days(value) <= // delta_keep_days) { -// return DownpourCtrDoubleFeatureValue::show(value) > +// return DownpourCtrDoubleFeatureValue::Show(value) > // global_cache_threshold; // } // return false; // } -bool DownpourCtrDoubleAccessor::save(float* value, int param) { +bool DownpourCtrDoubleAccessor::Save(float* value, int param) { // auto base_threshold = _config.ctr_accessor_param().base_threshold(); // auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); // auto delta_keep_days = _config.ctr_accessor_param().delta_keep_days(); @@ -167,8 +166,8 @@ bool DownpourCtrDoubleAccessor::save(float* value, int param) { case 1: // save xbox base case 2: { - if (show_click_score(DownpourCtrDoubleFeatureValue::show(value), - DownpourCtrDoubleFeatureValue::click(value)) >= + if (show_click_score(DownpourCtrDoubleFeatureValue::Show(value), + DownpourCtrDoubleFeatureValue::Click(value)) >= base_threshold && DownpourCtrDoubleFeatureValue::delta_score(value) >= delta_threshold && @@ -185,8 +184,8 @@ bool DownpourCtrDoubleAccessor::save(float* value, int param) { } // already decayed in shrink case 3: { - // DownpourCtrFeatureValue::show(value) *= _show_click_decay_rate; - // DownpourCtrFeatureValue::click(value) *= _show_click_decay_rate; + // DownpourCtrFeatureValue::Show(value) *= _show_click_decay_rate; + // DownpourCtrFeatureValue::Click(value) *= _show_click_decay_rate; // do this after save, because it must not be modified when retry // DownpourCtrDoubleFeatureValue::unseen_days(value)++; return true; @@ -196,8 +195,7 @@ bool DownpourCtrDoubleAccessor::save(float* value, int param) { }; } -void DownpourCtrDoubleAccessor::update_stat_after_save(float* value, - int param) { +void DownpourCtrDoubleAccessor::UpdateStatAfterSave(float* value, int param) { auto base_threshold = _config.ctr_accessor_param().base_threshold(); auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delta_keep_days = _config.ctr_accessor_param().delta_keep_days(); @@ -206,8 +204,8 @@ void DownpourCtrDoubleAccessor::update_stat_after_save(float* value, } switch (param) { case 1: { - if (show_click_score(DownpourCtrDoubleFeatureValue::show(value), - DownpourCtrDoubleFeatureValue::click(value)) >= + if (show_click_score(DownpourCtrDoubleFeatureValue::Show(value), + DownpourCtrDoubleFeatureValue::Click(value)) >= base_threshold && DownpourCtrDoubleFeatureValue::delta_score(value) >= delta_threshold && @@ -226,29 +224,29 @@ void DownpourCtrDoubleAccessor::update_stat_after_save(float* value, }; } -int32_t DownpourCtrDoubleAccessor::create(float** values, size_t num) { +int32_t DownpourCtrDoubleAccessor::Create(float** values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* value = values[value_item]; value[DownpourCtrDoubleFeatureValue::unseen_days_index()] = 0; value[DownpourCtrDoubleFeatureValue::delta_score_index()] = 0; - *(double*)(value + DownpourCtrDoubleFeatureValue::show_index()) = 0; - *(double*)(value + DownpourCtrDoubleFeatureValue::click_index()) = 0; - value[DownpourCtrDoubleFeatureValue::slot_index()] = -1; + *(double*)(value + DownpourCtrDoubleFeatureValue::ShowIndex()) = 0; + *(double*)(value + DownpourCtrDoubleFeatureValue::ClickIndex()) = 0; + value[DownpourCtrDoubleFeatureValue::SlotIndex()] = -1; _embed_sgd_rule->init_value( - value + DownpourCtrDoubleFeatureValue::embed_w_index(), + value + DownpourCtrDoubleFeatureValue::Embed_W_Index(), value + DownpourCtrDoubleFeatureValue::embed_g2sum_index()); _embedx_sgd_rule->init_value( - value + DownpourCtrDoubleFeatureValue::embedx_w_index(), + value + DownpourCtrDoubleFeatureValue::Embedx_W_Index(), value + DownpourCtrDoubleFeatureValue::embedx_g2sum_index(), false); } return 0; } -bool DownpourCtrDoubleAccessor::need_extend_mf(float* value) { +bool DownpourCtrDoubleAccessor::NeedExtendMF(float* value) { auto show = - ((double*)(value + DownpourCtrDoubleFeatureValue::show_index()))[0]; + ((double*)(value + DownpourCtrDoubleFeatureValue::ShowIndex()))[0]; auto click = - ((double*)(value + DownpourCtrDoubleFeatureValue::click_index()))[0]; + ((double*)(value + DownpourCtrDoubleFeatureValue::ClickIndex()))[0]; // float score = (show - click) * _config.ctr_accessor_param().nonclk_coeff() auto score = (show - click) * _config.ctr_accessor_param().nonclk_coeff() + click * _config.ctr_accessor_param().click_coeff(); @@ -256,20 +254,20 @@ bool DownpourCtrDoubleAccessor::need_extend_mf(float* value) { return score >= _config.embedx_threshold(); } // from DownpourCtrFeatureValue to DownpourCtrPullValue -int32_t DownpourCtrDoubleAccessor::select(float** select_values, +int32_t DownpourCtrDoubleAccessor::Select(float** select_values, const float** values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* select_value = select_values[value_item]; float* value = const_cast(values[value_item]); - select_value[DownpourCtrDoublePullValue::show_index()] = - (float)*(double*)(value + DownpourCtrDoubleFeatureValue::show_index()); - select_value[DownpourCtrDoublePullValue::click_index()] = - (float)*(double*)(value + DownpourCtrDoubleFeatureValue::click_index()); - select_value[DownpourCtrDoublePullValue::embed_w_index()] = - value[DownpourCtrDoubleFeatureValue::embed_w_index()]; - memcpy(select_value + DownpourCtrDoublePullValue::embedx_w_index(), - value + DownpourCtrDoubleFeatureValue::embedx_w_index(), + select_value[DownpourCtrDoublePullValue::ShowIndex()] = + (float)*(double*)(value + DownpourCtrDoubleFeatureValue::ShowIndex()); + select_value[DownpourCtrDoublePullValue::ClickIndex()] = + (float)*(double*)(value + DownpourCtrDoubleFeatureValue::ClickIndex()); + select_value[DownpourCtrDoublePullValue::Embed_W_Index()] = + value[DownpourCtrDoubleFeatureValue::Embed_W_Index()]; + memcpy(select_value + DownpourCtrDoublePullValue::Embedx_W_Index(), + value + DownpourCtrDoubleFeatureValue::Embedx_W_Index(), embedx_dim * sizeof(float)); } return 0; @@ -277,23 +275,23 @@ int32_t DownpourCtrDoubleAccessor::select(float** select_values, // from DownpourCtrPushValue to DownpourCtrPushValue // first dim: item // second dim: field num -int32_t DownpourCtrDoubleAccessor::merge(float** update_values, +int32_t DownpourCtrDoubleAccessor::Merge(float** update_values, const float** other_update_values, size_t num) { auto embedx_dim = _config.embedx_dim(); - size_t total_dim = DownpourCtrDoublePushValue::dim(embedx_dim); + size_t total_dim = DownpourCtrDoublePushValue::Dim(embedx_dim); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* other_update_value = other_update_values[value_item]; - /**(double*)(update_value + DownpourCtrDoublePushValue::show_index()) += - *(double*)(other_update_value + DownpourCtrDoublePushValue::show_index()); - *(double*)(update_value + DownpourCtrDoublePushValue::click_index()) += - *(double*)(other_update_value + DownpourCtrDoublePushValue::click_index()); + /**(double*)(update_value + DownpourCtrDoublePushValue::ShowIndex()) += + *(double*)(other_update_value + DownpourCtrDoublePushValue::ShowIndex()); + *(double*)(update_value + DownpourCtrDoublePushValue::ClickIndex()) += + *(double*)(other_update_value + DownpourCtrDoublePushValue::ClickIndex()); for (auto i = 3u; i < total_dim; ++i) { update_value[i] += other_update_value[i]; }*/ for (auto i = 0u; i < total_dim; ++i) { - if (i != DownpourCtrDoublePushValue::slot_index()) { + if (i != DownpourCtrDoublePushValue::SlotIndex()) { update_value[i] += other_update_value[i]; } } @@ -303,21 +301,21 @@ int32_t DownpourCtrDoubleAccessor::merge(float** update_values, // from DownpourCtrPushValue to DownpourCtrFeatureValue // first dim: item // second dim: field num -int32_t DownpourCtrDoubleAccessor::update(float** update_values, +int32_t DownpourCtrDoubleAccessor::Update(float** update_values, const float** push_values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* push_value = push_values[value_item]; - float push_show = push_value[DownpourCtrDoublePushValue::show_index()]; - float push_click = push_value[DownpourCtrDoublePushValue::click_index()]; - float slot = push_value[DownpourCtrDoublePushValue::slot_index()]; - *(double*)(update_value + DownpourCtrDoubleFeatureValue::show_index()) += + float push_show = push_value[DownpourCtrDoublePushValue::ShowIndex()]; + float push_click = push_value[DownpourCtrDoublePushValue::ClickIndex()]; + float slot = push_value[DownpourCtrDoublePushValue::SlotIndex()]; + *(double*)(update_value + DownpourCtrDoubleFeatureValue::ShowIndex()) += (double)push_show; - *(double*)(update_value + DownpourCtrDoubleFeatureValue::click_index()) += + *(double*)(update_value + DownpourCtrDoubleFeatureValue::ClickIndex()) += (double)push_click; - update_value[DownpourCtrDoubleFeatureValue::slot_index()] = slot; + update_value[DownpourCtrDoubleFeatureValue::SlotIndex()] = slot; update_value[DownpourCtrDoubleFeatureValue::delta_score_index()] += (push_show - push_click) * _config.ctr_accessor_param().nonclk_coeff() + push_click * _config.ctr_accessor_param().click_coeff(); @@ -325,24 +323,24 @@ int32_t DownpourCtrDoubleAccessor::update(float** update_values, // push_click * _config.ctr_accessor_param().click_coeff(); update_value[DownpourCtrDoubleFeatureValue::unseen_days_index()] = 0; _embed_sgd_rule->update_value( - update_value + DownpourCtrDoubleFeatureValue::embed_w_index(), + update_value + DownpourCtrDoubleFeatureValue::Embed_W_Index(), update_value + DownpourCtrDoubleFeatureValue::embed_g2sum_index(), - push_value + DownpourCtrDoublePushValue::embed_g_index(), push_show); + push_value + DownpourCtrDoublePushValue::Embed_G_Index(), push_show); _embedx_sgd_rule->update_value( - update_value + DownpourCtrDoubleFeatureValue::embedx_w_index(), + update_value + DownpourCtrDoubleFeatureValue::Embedx_W_Index(), update_value + DownpourCtrDoubleFeatureValue::embedx_g2sum_index(), - push_value + DownpourCtrDoublePushValue::embedx_g_index(), push_show); + push_value + DownpourCtrDoublePushValue::Embedx_G_Index(), push_show); } return 0; } -bool DownpourCtrDoubleAccessor::create_value(int stage, const float* value) { +bool DownpourCtrDoubleAccessor::CreateValue(int stage, const float* value) { // stage == 0, pull // stage == 1, push if (stage == 0) { return true; } else if (stage == 1) { - auto show = DownpourCtrDoublePushValue::show(const_cast(value)); - auto click = DownpourCtrDoublePushValue::click(const_cast(value)); + auto show = DownpourCtrDoublePushValue::Show(const_cast(value)); + auto click = DownpourCtrDoublePushValue::Click(const_cast(value)); auto score = show_click_score(show, click); if (score <= 0) { return false; @@ -363,16 +361,16 @@ double DownpourCtrDoubleAccessor::show_click_score(double show, double click) { auto click_coeff = _config.ctr_accessor_param().click_coeff(); return (show - click) * nonclk_coeff + click * click_coeff; } -std::string DownpourCtrDoubleAccessor::parse_to_string(const float* v, - int param_size) { +std::string DownpourCtrDoubleAccessor::ParseToString(const float* v, + int param_size) { thread_local std::ostringstream os; os.clear(); os.str(""); os << v[0] << " " << v[1] << " " << (float)((double*)(v + 2))[0] << " " << (float)((double*)(v + 4))[0] << " " << v[6] << " " << v[7] << " " << v[8]; - auto show = DownpourCtrDoubleFeatureValue::show(const_cast(v)); - auto click = DownpourCtrDoubleFeatureValue::click(const_cast(v)); + auto show = DownpourCtrDoubleFeatureValue::Show(const_cast(v)); + auto click = DownpourCtrDoubleFeatureValue::Click(const_cast(v)); auto score = show_click_score(show, click); if (score >= _config.embedx_threshold() && param_size > 9) { os << " " << v[9]; @@ -382,23 +380,23 @@ std::string DownpourCtrDoubleAccessor::parse_to_string(const float* v, } return os.str(); } -int DownpourCtrDoubleAccessor::parse_from_string(const std::string& str, - float* value) { +int DownpourCtrDoubleAccessor::ParseFromString(const std::string& str, + float* value) { int embedx_dim = _config.embedx_dim(); - float data_buff[dim() + 2]; + float data_buff[Dim() + 2]; float* data_buff_ptr = data_buff; _embedx_sgd_rule->init_value( - data_buff_ptr + DownpourCtrDoubleFeatureValue::embedx_w_index(), + data_buff_ptr + DownpourCtrDoubleFeatureValue::Embedx_W_Index(), data_buff_ptr + DownpourCtrDoubleFeatureValue::embedx_g2sum_index()); auto str_len = paddle::string::str_to_float(str.data(), data_buff_ptr); CHECK(str_len >= 6) << "expect more than 6 real:" << str_len; - int show_index = DownpourCtrDoubleFeatureValue::show_index(); - int click_index = DownpourCtrDoubleFeatureValue::click_index(); - int embed_w_index = DownpourCtrDoubleFeatureValue::embed_w_index(); + int show_index = DownpourCtrDoubleFeatureValue::ShowIndex(); + int click_index = DownpourCtrDoubleFeatureValue::ClickIndex(); + int embed_w_index = DownpourCtrDoubleFeatureValue::Embed_W_Index(); // no slot, embedx - int value_dim = dim(); + int value_dim = Dim(); int embedx_g2sum_index = DownpourCtrDoubleFeatureValue::embedx_g2sum_index(); - value[DownpourCtrDoubleFeatureValue::slot_index()] = -1; + value[DownpourCtrDoubleFeatureValue::SlotIndex()] = -1; // other case if (str_len == (value_dim - 1)) { // copy unseen_days..delta_score @@ -407,7 +405,7 @@ int DownpourCtrDoubleAccessor::parse_from_string(const std::string& str, *(double*)(value + show_index) = (double)data_buff_ptr[2]; *(double*)(value + click_index) = (double)data_buff_ptr[3]; // copy others - value[DownpourCtrDoubleFeatureValue::embed_w_index()] = data_buff_ptr[4]; + value[DownpourCtrDoubleFeatureValue::Embed_W_Index()] = data_buff_ptr[4]; value[DownpourCtrDoubleFeatureValue::embed_g2sum_index()] = data_buff_ptr[5]; memcpy(value + embedx_g2sum_index, data_buff_ptr + 6, diff --git a/paddle/fluid/distributed/ps/table/ctr_double_accessor.h b/paddle/fluid/distributed/ps/table/ctr_double_accessor.h index fb8b27ecfd9854..29ddcbc86d7c78 100644 --- a/paddle/fluid/distributed/ps/table/ctr_double_accessor.h +++ b/paddle/fluid/distributed/ps/table/ctr_double_accessor.h @@ -38,36 +38,36 @@ class DownpourCtrDoubleAccessor : public ValueAccessor { float embedx_g2sum; std::vector embedx_w; */ - static int dim(int embedx_dim) { return 8 + embedx_dim; } - static int dim_size(size_t dim, int embedx_dim) { return sizeof(float); } - static int size(int embedx_dim) { - return (dim(embedx_dim) + 2) * sizeof(float); + static int Dim(int embedx_dim) { return 8 + embedx_dim; } + static int DimSize(size_t dim, int embedx_dim) { return sizeof(float); } + static int Size(int embedx_dim) { + return (Dim(embedx_dim) + 2) * sizeof(float); } static int unseen_days_index() { return 0; } static int delta_score_index() { return DownpourCtrDoubleFeatureValue::unseen_days_index() + 1; } - static int show_index() { + static int ShowIndex() { return DownpourCtrDoubleFeatureValue::delta_score_index() + 1; } // show is double - static int click_index() { - return DownpourCtrDoubleFeatureValue::show_index() + 2; + static int ClickIndex() { + return DownpourCtrDoubleFeatureValue::ShowIndex() + 2; } // click is double - static int embed_w_index() { - return DownpourCtrDoubleFeatureValue::click_index() + 2; + static int Embed_W_Index() { + return DownpourCtrDoubleFeatureValue::ClickIndex() + 2; } static int embed_g2sum_index() { - return DownpourCtrDoubleFeatureValue::embed_w_index() + 1; + return DownpourCtrDoubleFeatureValue::Embed_W_Index() + 1; } - static int slot_index() { + static int SlotIndex() { return DownpourCtrDoubleFeatureValue::embed_g2sum_index() + 1; } static int embedx_g2sum_index() { - return DownpourCtrDoubleFeatureValue::slot_index() + 1; + return DownpourCtrDoubleFeatureValue::SlotIndex() + 1; } - static int embedx_w_index() { + static int Embedx_W_Index() { return DownpourCtrDoubleFeatureValue::embedx_g2sum_index() + 1; } static float& unseen_days(float* val) { @@ -76,17 +76,17 @@ class DownpourCtrDoubleAccessor : public ValueAccessor { static float& delta_score(float* val) { return val[DownpourCtrDoubleFeatureValue::delta_score_index()]; } - static double& show(float* val) { - return ((double*)(val + DownpourCtrDoubleFeatureValue::show_index()))[0]; + static double& Show(float* val) { + return ((double*)(val + DownpourCtrDoubleFeatureValue::ShowIndex()))[0]; } - static double& click(float* val) { - return ((double*)(val + DownpourCtrDoubleFeatureValue::click_index()))[0]; + static double& Click(float* val) { + return ((double*)(val + DownpourCtrDoubleFeatureValue::ClickIndex()))[0]; } - static float& slot(float* val) { - return val[DownpourCtrDoubleFeatureValue::slot_index()]; + static float& Slot(float* val) { + return val[DownpourCtrDoubleFeatureValue::SlotIndex()]; } - static float& embed_w(float* val) { - return val[DownpourCtrDoubleFeatureValue::embed_w_index()]; + static float& EmbedW(float* val) { + return val[DownpourCtrDoubleFeatureValue::Embed_W_Index()]; } static float& embed_g2sum(float* val) { return val[DownpourCtrDoubleFeatureValue::embed_g2sum_index()]; @@ -94,8 +94,8 @@ class DownpourCtrDoubleAccessor : public ValueAccessor { static float& embedx_g2sum(float* val) { return val[DownpourCtrDoubleFeatureValue::embedx_g2sum_index()]; } - static float* embedx_w(float* val) { - return (val + DownpourCtrDoubleFeatureValue::embedx_w_index()); + static float* EmbedxW(float* val) { + return (val + DownpourCtrDoubleFeatureValue::Embedx_W_Index()); } }; struct DownpourCtrDoublePushValue { @@ -106,36 +106,36 @@ class DownpourCtrDoubleAccessor : public ValueAccessor { float embed_g; std::vector embedx_g; */ - static int dim(int embedx_dim) { return 4 + embedx_dim; } - static int dim_size(int dim, int embedx_dim) { return sizeof(float); } - static int size(int embedx_dim) { return dim(embedx_dim) * sizeof(float); } - static int slot_index() { return 0; } - static int show_index() { - return DownpourCtrDoublePushValue::slot_index() + 1; + static int Dim(int embedx_dim) { return 4 + embedx_dim; } + static int DimSize(int dim, int embedx_dim) { return sizeof(float); } + static int Size(int embedx_dim) { return Dim(embedx_dim) * sizeof(float); } + static int SlotIndex() { return 0; } + static int ShowIndex() { + return DownpourCtrDoublePushValue::SlotIndex() + 1; } - static int click_index() { - return DownpourCtrDoublePushValue::show_index() + 1; + static int ClickIndex() { + return DownpourCtrDoublePushValue::ShowIndex() + 1; } - static int embed_g_index() { - return DownpourCtrDoublePushValue::click_index() + 1; + static int Embed_G_Index() { + return DownpourCtrDoublePushValue::ClickIndex() + 1; } - static int embedx_g_index() { - return DownpourCtrDoublePushValue::embed_g_index() + 1; + static int Embedx_G_Index() { + return DownpourCtrDoublePushValue::Embed_G_Index() + 1; } - static float& slot(float* val) { - return val[DownpourCtrDoublePushValue::slot_index()]; + static float& Slot(float* val) { + return val[DownpourCtrDoublePushValue::SlotIndex()]; } - static float& show(float* val) { - return val[DownpourCtrDoublePushValue::show_index()]; + static float& Show(float* val) { + return val[DownpourCtrDoublePushValue::ShowIndex()]; } - static float& click(float* val) { - return val[DownpourCtrDoublePushValue::click_index()]; + static float& Click(float* val) { + return val[DownpourCtrDoublePushValue::ClickIndex()]; } - static float& embed_g(float* val) { - return val[DownpourCtrDoublePushValue::embed_g_index()]; + static float& EmbedG(float* val) { + return val[DownpourCtrDoublePushValue::Embed_G_Index()]; } - static float* embedx_g(float* val) { - return val + DownpourCtrDoublePushValue::embedx_g_index(); + static float* EmbedxG(float* val) { + return val + DownpourCtrDoublePushValue::Embedx_G_Index(); } }; struct DownpourCtrDoublePullValue { @@ -145,88 +145,88 @@ class DownpourCtrDoubleAccessor : public ValueAccessor { float embed_w; std::vector embedx_w; */ - static int dim(int embedx_dim) { return 3 + embedx_dim; } - static int dim_size(size_t dim) { return sizeof(float); } - static int size(int embedx_dim) { return dim(embedx_dim) * sizeof(float); } - static int show_index() { return 0; } - static int click_index() { return 1; } - static int embed_w_index() { return 2; } - static int embedx_w_index() { return 3; } - static float& show(float* val) { - return val[DownpourCtrDoublePullValue::show_index()]; + static int Dim(int embedx_dim) { return 3 + embedx_dim; } + static int DimSize(size_t dim) { return sizeof(float); } + static int Size(int embedx_dim) { return Dim(embedx_dim) * sizeof(float); } + static int ShowIndex() { return 0; } + static int ClickIndex() { return 1; } + static int Embed_W_Index() { return 2; } + static int Embedx_W_Index() { return 3; } + static float& Show(float* val) { + return val[DownpourCtrDoublePullValue::ShowIndex()]; } - static float& click(float* val) { - return val[DownpourCtrDoublePullValue::click_index()]; + static float& Click(float* val) { + return val[DownpourCtrDoublePullValue::ClickIndex()]; } - static float& embed_w(float* val) { - return val[DownpourCtrDoublePullValue::embed_w_index()]; + static float& EmbedW(float* val) { + return val[DownpourCtrDoublePullValue::Embed_W_Index()]; } - static float* embedx_w(float* val) { - return val + DownpourCtrDoublePullValue::embedx_w_index(); + static float* EmbedxW(float* val) { + return val + DownpourCtrDoublePullValue::Embedx_W_Index(); } }; DownpourCtrDoubleAccessor() {} virtual ~DownpourCtrDoubleAccessor() {} - virtual int initialize(); + virtual int Initialize(); virtual void SetTableInfo(AccessorInfo& info); virtual size_t GetTableInfo(InfoKey key); // value维度 - virtual size_t dim(); + size_t Dim(); // value各个维度的size - virtual size_t dim_size(size_t dim); + size_t DimSize(size_t dim); // value各维度相加总size - virtual size_t size(); + size_t Size(); // value中mf动态长度部分总size大小, sparse下生效 - virtual size_t mf_size(); + size_t MFSize(); // pull value维度 - virtual size_t select_dim(); + size_t SelectDim(); // pull value各个维度的size - virtual size_t select_dim_size(size_t dim); + size_t SelectDimSize(size_t dim); // pull value各维度相加总size - virtual size_t select_size(); + size_t SelectSize(); // push value维度 - virtual size_t update_dim(); + size_t UpdateDim(); // push value各个维度的size - virtual size_t update_dim_size(size_t dim); + size_t UpdateDimSize(size_t dim); // push value各维度相加总size - virtual size_t update_size(); + size_t UpdateSize(); // 判断该value是否进行shrink - virtual bool shrink(float* value); - virtual bool need_extend_mf(float* value); + virtual bool Shrink(float* value); + virtual bool NeedExtendMF(float* value); // 判断该value是否在save阶段dump, // param作为参数用于标识save阶段,如downpour的xbox与batch_model // param = 0, save all feature // param = 1, save delta feature // param = 3, save all feature with time decay - virtual bool save(float* value, int param) override; + virtual bool Save(float* value, int param) override; // update delta_score and unseen_days after save - virtual void update_stat_after_save(float* value, int param) override; + virtual void UpdateStatAfterSave(float* value, int param) override; // 判断该value是否保存到ssd virtual bool save_ssd(float* value); // virtual bool save_cache(float* value, int param, double // global_cache_threshold) override; // keys不存在时,为values生成随机值 // 要求value的内存由外部调用者分配完毕 - virtual int32_t create(float** value, size_t num); + virtual int32_t Create(float** value, size_t num); // 从values中选取到select_values中 - virtual int32_t select(float** select_values, const float** values, + virtual int32_t Select(float** select_values, const float** values, size_t num); // 将update_values聚合到一起 - virtual int32_t merge(float** update_values, + virtual int32_t Merge(float** update_values, const float** other_update_values, size_t num); // 将update_values聚合到一起,通过it.next判定是否进入下一个key - // virtual int32_t merge(float** update_values, iterator it); + // virtual int32_t Merge(float** update_values, iterator it); // 将update_values更新应用到values中 - virtual int32_t update(float** values, const float** update_values, + virtual int32_t Update(float** values, const float** update_values, size_t num); - virtual std::string parse_to_string(const float* value, int param) override; - virtual int32_t parse_from_string(const std::string& str, float* v) override; - virtual bool create_value(int type, const float* value); + virtual std::string ParseToString(const float* value, int param) override; + virtual int32_t ParseFromString(const std::string& str, float* v) override; + virtual bool CreateValue(int type, const float* value); //这个接口目前只用来取show - virtual float get_field(float* value, const std::string& name) override { + virtual float GetField(float* value, const std::string& name) override { CHECK(name == "show"); if (name == "show") { - return (float)DownpourCtrDoubleFeatureValue::show(value); + return (float)DownpourCtrDoubleFeatureValue::Show(value); } return 0.0; } diff --git a/paddle/fluid/distributed/ps/table/downpour_ctr_accessor.cc b/paddle/fluid/distributed/ps/table/downpour_ctr_accessor.cc index 2fff81b1a4dc61..1140afd1c1e096 100644 --- a/paddle/fluid/distributed/ps/table/downpour_ctr_accessor.cc +++ b/paddle/fluid/distributed/ps/table/downpour_ctr_accessor.cc @@ -20,7 +20,7 @@ namespace paddle { namespace distributed { -int DownpourCtrAccessor::initialize() { +int DownpourCtrAccessor::Initialize() { auto name = _config.embed_sgd_param().name(); _embed_sgd_rule = CREATE_PSCORE_CLASS(SparseValueSGDRule, name); _embed_sgd_rule->load_config(_config.embed_sgd_param(), 1); @@ -38,86 +38,77 @@ int DownpourCtrAccessor::initialize() { } void DownpourCtrAccessor::SetTableInfo(AccessorInfo& info) { - info.dim = dim(); - info.size = size(); - info.select_dim = select_dim(); - info.select_size = select_size(); - info.update_dim = update_dim(); - info.update_size = update_size(); - info.mf_size = mf_size(); - info.fea_dim = fea_dim(); + info.dim = Dim(); + info.size = Size(); + info.select_dim = SelectDim(); + info.select_size = SelectSize(); + info.update_dim = UpdateDim(); + info.update_size = UpdateSize(); + info.mf_size = MFSize(); } size_t DownpourCtrAccessor::GetTableInfo(InfoKey key) { switch (key) { case DIM: - return dim(); + return Dim(); case SIZE: - return size(); + return Size(); case SELECT_DIM: - return select_dim(); + return SelectDim(); case SELECT_SIZE: - return select_size(); + return SelectSize(); case UPDATE_DIM: - return update_dim(); + return UpdateDim(); case UPDATE_SIZE: - return update_size(); + return UpdateSize(); case MF_SIZE: - return mf_size(); - case FEA_DIM: - return fea_dim(); + return MFSize(); + default: + return 0; } return 0; } -size_t DownpourCtrAccessor::dim() { +size_t DownpourCtrAccessor::Dim() { auto embedx_dim = _config.embedx_dim(); - return DownpourCtrFeatureValue::dim(embedx_dim); + return DownpourCtrFeatureValue::Dim(embedx_dim); } -size_t DownpourCtrAccessor::dim_size(size_t dim) { +size_t DownpourCtrAccessor::DimSize(size_t dim) { auto embedx_dim = _config.embedx_dim(); - return DownpourCtrFeatureValue::dim_size(dim, embedx_dim); + return DownpourCtrFeatureValue::DimSize(dim, embedx_dim); } -size_t DownpourCtrAccessor::size() { +size_t DownpourCtrAccessor::Size() { auto embedx_dim = _config.embedx_dim(); - return DownpourCtrFeatureValue::size(embedx_dim); + return DownpourCtrFeatureValue::Size(embedx_dim); } -size_t DownpourCtrAccessor::mf_size() { +size_t DownpourCtrAccessor::MFSize() { return (_config.embedx_dim() + 1) * sizeof(float); // embedx embedx_g2sum } // pull value -size_t DownpourCtrAccessor::select_dim() { +size_t DownpourCtrAccessor::SelectDim() { auto embedx_dim = _config.embedx_dim(); return 3 + embedx_dim; } -size_t DownpourCtrAccessor::select_dim_size(size_t dim) { - return sizeof(float); -} +size_t DownpourCtrAccessor::SelectDimSize(size_t dim) { return sizeof(float); } -size_t DownpourCtrAccessor::select_size() { - return select_dim() * sizeof(float); -} +size_t DownpourCtrAccessor::SelectSize() { return SelectDim() * sizeof(float); } // push value -size_t DownpourCtrAccessor::update_dim() { +size_t DownpourCtrAccessor::UpdateDim() { auto embedx_dim = _config.embedx_dim(); return 4 + embedx_dim; } -size_t DownpourCtrAccessor::update_dim_size(size_t dim) { - return sizeof(float); -} +size_t DownpourCtrAccessor::UpdateDimSize(size_t dim) { return sizeof(float); } -size_t DownpourCtrAccessor::update_size() { - return update_dim() * sizeof(float); -} +size_t DownpourCtrAccessor::UpdateSize() { return UpdateDim() * sizeof(float); } -bool DownpourCtrAccessor::shrink(float* value) { +bool DownpourCtrAccessor::Shrink(float* value) { // auto base_threshold = _config.ctr_accessor_param().base_threshold(); // auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); // auto delete_threshold = _config.ctr_accessor_param().delete_threshold(); @@ -134,9 +125,9 @@ bool DownpourCtrAccessor::shrink(float* value) { return true; } auto show_right = - DownpourCtrFeatureValue::show(value) * _time_decay_rates[day_diff]; + DownpourCtrFeatureValue::Show(value) * _time_decay_rates[day_diff]; auto click_right = - DownpourCtrFeatureValue::click(value) * _time_decay_rates[day_diff]; + DownpourCtrFeatureValue::Click(value) * _time_decay_rates[day_diff]; // shrink after auto score = show_click_score(show_right, click_right); @@ -175,15 +166,15 @@ bool DownpourCtrAccessor::save_ssd(float* value) { // auto delta_keep_days = _config.ctr_accessor_param().delta_keep_days(); // auto unseen_days = DownpourCtrFeatureValue::unseen_days(value); // int16_t day_diff = _day_id - unseen_days; -// if (show_click_score(DownpourCtrFeatureValue::show(value), -// DownpourCtrFeatureValue::click(value)) >= base_threshold +// if (show_click_score(DownpourCtrFeatureValue::Show(value), +// DownpourCtrFeatureValue::Click(value)) >= base_threshold // && day_diff <= delta_keep_days) { -// return DownpourCtrFeatureValue::show(value) > global_cache_threshold; +// return DownpourCtrFeatureValue::Show(value) > global_cache_threshold; // } // return false; // } -bool DownpourCtrAccessor::save(float* value, int param) { +bool DownpourCtrAccessor::Save(float* value, int param) { // auto base_threshold = _config.ctr_accessor_param().base_threshold(); // auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); // auto delta_keep_days = _config.ctr_accessor_param().delta_keep_days(); @@ -206,9 +197,9 @@ bool DownpourCtrAccessor::save(float* value, int param) { int16_t day_diff = _day_id - unseen_days; auto show_right = - DownpourCtrFeatureValue::show(value) * _time_decay_rates[day_diff]; + DownpourCtrFeatureValue::Show(value) * _time_decay_rates[day_diff]; auto click_right = - DownpourCtrFeatureValue::click(value) * _time_decay_rates[day_diff]; + DownpourCtrFeatureValue::Click(value) * _time_decay_rates[day_diff]; if (show_click_score(show_right, click_right) >= base_threshold && DownpourCtrFeatureValue::delta_score(value) >= delta_threshold && @@ -224,8 +215,8 @@ bool DownpourCtrAccessor::save(float* value, int param) { } // already decayed in shrink case 3: { - // DownpourCtrFeatureValue::show(value) *= _show_click_decay_rate; - // DownpourCtrFeatureValue::click(value) *= _show_click_decay_rate; + // DownpourCtrFeatureValue::Show(value) *= _show_click_decay_rate; + // DownpourCtrFeatureValue::Click(value) *= _show_click_decay_rate; // do this after save, because it must not be modified when retry // DownpourCtrFeatureValue::unseen_days(value)++; return true; @@ -235,7 +226,7 @@ bool DownpourCtrAccessor::save(float* value, int param) { }; } -void DownpourCtrAccessor::update_stat_after_save(float* value, int param) { +void DownpourCtrAccessor::UpdateStatAfterSave(float* value, int param) { auto base_threshold = _config.ctr_accessor_param().base_threshold(); auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delta_keep_days = _config.ctr_accessor_param().delta_keep_days(); @@ -247,9 +238,9 @@ void DownpourCtrAccessor::update_stat_after_save(float* value, int param) { auto unseen_days = DownpourCtrFeatureValue::unseen_days(value); int16_t day_diff = _day_id - unseen_days; auto show_right = - DownpourCtrFeatureValue::show(value) * _time_decay_rates[day_diff]; + DownpourCtrFeatureValue::Show(value) * _time_decay_rates[day_diff]; auto click_right = - DownpourCtrFeatureValue::click(value) * _time_decay_rates[day_diff]; + DownpourCtrFeatureValue::Click(value) * _time_decay_rates[day_diff]; if (show_click_score(show_right, click_right) >= base_threshold && DownpourCtrFeatureValue::delta_score(value) >= delta_threshold && @@ -268,28 +259,28 @@ void DownpourCtrAccessor::update_stat_after_save(float* value, int param) { }; } -int32_t DownpourCtrAccessor::create(float** values, size_t num) { +int32_t DownpourCtrAccessor::Create(float** values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* value = values[value_item]; value[DownpourCtrFeatureValue::unseen_days_index()] = 0; value[DownpourCtrFeatureValue::delta_score_index()] = 0; - value[DownpourCtrFeatureValue::show_index()] = 0; - value[DownpourCtrFeatureValue::click_index()] = 0; - value[DownpourCtrFeatureValue::slot_index()] = -1; + value[DownpourCtrFeatureValue::ShowIndex()] = 0; + value[DownpourCtrFeatureValue::ClickIndex()] = 0; + value[DownpourCtrFeatureValue::SlotIndex()] = -1; _embed_sgd_rule->init_value( - value + DownpourCtrFeatureValue::embed_w_index(), + value + DownpourCtrFeatureValue::Embed_W_Index(), value + DownpourCtrFeatureValue::embed_g2sum_index(), true); _embedx_sgd_rule->init_value( - value + DownpourCtrFeatureValue::embedx_w_index(), + value + DownpourCtrFeatureValue::Embedx_W_Index(), value + DownpourCtrFeatureValue::embedx_g2sum_index()); } return 0; } -bool DownpourCtrAccessor::need_extend_mf(float* value) { - float show = value[DownpourCtrFeatureValue::show_index()]; - float click = value[DownpourCtrFeatureValue::click_index()]; +bool DownpourCtrAccessor::NeedExtendMF(float* value) { + float show = value[DownpourCtrFeatureValue::ShowIndex()]; + float click = value[DownpourCtrFeatureValue::ClickIndex()]; // float score = (show - click) * _config.ctr_accessor_param().nonclk_coeff() float score = (show - click) * _config.ctr_accessor_param().nonclk_coeff() + click * _config.ctr_accessor_param().click_coeff(); @@ -297,25 +288,25 @@ bool DownpourCtrAccessor::need_extend_mf(float* value) { return score >= _config.embedx_threshold(); } -bool DownpourCtrAccessor::has_mf(size_t size) { +bool DownpourCtrAccessor::HasMF(size_t size) { return size > DownpourCtrFeatureValue::embedx_g2sum_index(); } // from DownpourCtrFeatureValue to DownpourCtrPullValue -int32_t DownpourCtrAccessor::select(float** select_values, const float** values, +int32_t DownpourCtrAccessor::Select(float** select_values, const float** values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* select_value = select_values[value_item]; float* value = const_cast(values[value_item]); - select_value[DownpourCtrPullValue::show_index()] = - value[DownpourCtrFeatureValue::show_index()]; - select_value[DownpourCtrPullValue::click_index()] = - value[DownpourCtrFeatureValue::click_index()]; - select_value[DownpourCtrPullValue::embed_w_index()] = - value[DownpourCtrFeatureValue::embed_w_index()]; - memcpy(select_value + DownpourCtrPullValue::embedx_w_index(), - value + DownpourCtrFeatureValue::embedx_w_index(), + select_value[DownpourCtrPullValue::ShowIndex()] = + value[DownpourCtrFeatureValue::ShowIndex()]; + select_value[DownpourCtrPullValue::ClickIndex()] = + value[DownpourCtrFeatureValue::ClickIndex()]; + select_value[DownpourCtrPullValue::Embed_W_Index()] = + value[DownpourCtrFeatureValue::Embed_W_Index()]; + memcpy(select_value + DownpourCtrPullValue::Embedx_W_Index(), + value + DownpourCtrFeatureValue::Embedx_W_Index(), embedx_dim * sizeof(float)); } return 0; @@ -324,16 +315,16 @@ int32_t DownpourCtrAccessor::select(float** select_values, const float** values, // from DownpourCtrPushValue to DownpourCtrPushValue // first dim: item // second dim: field num -int32_t DownpourCtrAccessor::merge(float** update_values, +int32_t DownpourCtrAccessor::Merge(float** update_values, const float** other_update_values, size_t num) { auto embedx_dim = _config.embedx_dim(); - size_t total_dim = DownpourCtrPushValue::dim(embedx_dim); + size_t total_dim = DownpourCtrPushValue::Dim(embedx_dim); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* other_update_value = other_update_values[value_item]; for (auto i = 0u; i < total_dim; ++i) { - if (i != DownpourCtrPushValue::slot_index()) { + if (i != DownpourCtrPushValue::SlotIndex()) { update_value[i] += other_update_value[i]; } } @@ -344,18 +335,18 @@ int32_t DownpourCtrAccessor::merge(float** update_values, // from DownpourCtrPushValue to DownpourCtrFeatureValue // first dim: item // second dim: field num -int32_t DownpourCtrAccessor::update(float** update_values, +int32_t DownpourCtrAccessor::Update(float** update_values, const float** push_values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* push_value = push_values[value_item]; - float push_show = push_value[DownpourCtrPushValue::show_index()]; - float push_click = push_value[DownpourCtrPushValue::click_index()]; - float slot = push_value[DownpourCtrPushValue::slot_index()]; - update_value[DownpourCtrFeatureValue::show_index()] += push_show; - update_value[DownpourCtrFeatureValue::click_index()] += push_click; - update_value[DownpourCtrFeatureValue::slot_index()] = slot; + float push_show = push_value[DownpourCtrPushValue::ShowIndex()]; + float push_click = push_value[DownpourCtrPushValue::ClickIndex()]; + float slot = push_value[DownpourCtrPushValue::SlotIndex()]; + update_value[DownpourCtrFeatureValue::ShowIndex()] += push_show; + update_value[DownpourCtrFeatureValue::ClickIndex()] += push_click; + update_value[DownpourCtrFeatureValue::SlotIndex()] = slot; update_value[DownpourCtrFeatureValue::delta_score_index()] += (push_show - push_click) * _config.ctr_accessor_param().nonclk_coeff() + push_click * _config.ctr_accessor_param().click_coeff(); @@ -363,25 +354,25 @@ int32_t DownpourCtrAccessor::update(float** update_values, // push_click * _config.ctr_accessor_param().click_coeff(); update_value[DownpourCtrFeatureValue::unseen_days_index()] = 0; _embed_sgd_rule->update_value( - update_value + DownpourCtrFeatureValue::embed_w_index(), + update_value + DownpourCtrFeatureValue::Embed_W_Index(), update_value + DownpourCtrFeatureValue::embed_g2sum_index(), - push_value + DownpourCtrPushValue::embed_g_index(), push_show); + push_value + DownpourCtrPushValue::Embed_G_Index(), push_show); _embedx_sgd_rule->update_value( - update_value + DownpourCtrFeatureValue::embedx_w_index(), + update_value + DownpourCtrFeatureValue::Embedx_W_Index(), update_value + DownpourCtrFeatureValue::embedx_g2sum_index(), - push_value + DownpourCtrPushValue::embedx_g_index(), push_show); + push_value + DownpourCtrPushValue::Embedx_G_Index(), push_show); } return 0; } -bool DownpourCtrAccessor::create_value(int stage, const float* value) { +bool DownpourCtrAccessor::CreateValue(int stage, const float* value) { // stage == 0, pull // stage == 1, push if (stage == 0) { return true; } else if (stage == 1) { - auto show = DownpourCtrPushValue::show(const_cast(value)); - auto click = DownpourCtrPushValue::click(const_cast(value)); + auto show = DownpourCtrPushValue::Show(const_cast(value)); + auto click = DownpourCtrPushValue::Click(const_cast(value)); auto score = show_click_score(show, click); if (score <= 0) { return false; @@ -404,15 +395,14 @@ float DownpourCtrAccessor::show_click_score(float show, float click) { return (show - click) * nonclk_coeff + click * click_coeff; } -std::string DownpourCtrAccessor::parse_to_string(const float* v, - int param_size) { +std::string DownpourCtrAccessor::ParseToString(const float* v, int param_size) { thread_local std::ostringstream os; os.clear(); os.str(""); os << v[0] << " " << v[1] << " " << v[2] << " " << v[3] << " " << v[4] << " " << v[5] << " " << v[6]; - auto show = DownpourCtrFeatureValue::show(const_cast(v)); - auto click = DownpourCtrFeatureValue::click(const_cast(v)); + auto show = DownpourCtrFeatureValue::Show(const_cast(v)); + auto click = DownpourCtrFeatureValue::Click(const_cast(v)); auto score = show_click_score(show, click); if (score >= _config.embedx_threshold() && param_size > 7) { os << " " << v[7]; @@ -423,22 +413,21 @@ std::string DownpourCtrAccessor::parse_to_string(const float* v, return os.str(); } -int DownpourCtrAccessor::parse_from_string(const std::string& str, - float* value) { +int DownpourCtrAccessor::ParseFromString(const std::string& str, float* value) { int embedx_dim = _config.embedx_dim(); - float data_buff[dim()]; + float data_buff[Dim()]; float* data_buff_ptr = data_buff; _embedx_sgd_rule->init_value( - data_buff_ptr + DownpourCtrFeatureValue::embedx_w_index(), + data_buff_ptr + DownpourCtrFeatureValue::Embedx_W_Index(), data_buff_ptr + DownpourCtrFeatureValue::embedx_g2sum_index()); auto str_len = paddle::string::str_to_float(str.data(), data_buff_ptr); CHECK(str_len >= 6) << "expect more than 6 real:" << str_len; // no slot, embedx - int value_dim = dim(); + int value_dim = Dim(); int embedx_g2sum_index = DownpourCtrFeatureValue::embedx_g2sum_index(); - value[DownpourCtrFeatureValue::slot_index()] = -1; + value[DownpourCtrFeatureValue::SlotIndex()] = -1; // other case if (str_len == (value_dim - 1)) { memcpy(value, data_buff_ptr, (embedx_g2sum_index - 1) * sizeof(float)); @@ -494,8 +483,8 @@ void DownpourCtrAccessor::update_time_decay(float* value, if (day_diff >= _config.ctr_accessor_param().delete_after_unseen_days()) { return; } - DownpourCtrFeatureValue::show(value) *= _time_decay_rates[day_diff]; - DownpourCtrFeatureValue::click(value) *= _time_decay_rates[day_diff]; + DownpourCtrFeatureValue::Show(value) *= _time_decay_rates[day_diff]; + DownpourCtrFeatureValue::Click(value) *= _time_decay_rates[day_diff]; if (is_update_seen_day) { DownpourCtrFeatureValue::unseen_days(value) = _day_id; } diff --git a/paddle/fluid/distributed/ps/table/downpour_ctr_accessor.h b/paddle/fluid/distributed/ps/table/downpour_ctr_accessor.h index 6ff6c0438310e3..de1f080f42e1f4 100644 --- a/paddle/fluid/distributed/ps/table/downpour_ctr_accessor.h +++ b/paddle/fluid/distributed/ps/table/downpour_ctr_accessor.h @@ -42,32 +42,30 @@ class DownpourCtrAccessor : public ValueAccessor { std::vector embedx_w; */ - static int dim(int embedx_dim) { return 8 + embedx_dim; } - static int dim_size(size_t dim, int embedx_dim) { return sizeof(float); } - static int size(int embedx_dim) { return dim(embedx_dim) * sizeof(float); } + static int Dim(int embedx_dim) { return 8 + embedx_dim; } + static int DimSize(size_t dim, int embedx_dim) { return sizeof(float); } + static int Size(int embedx_dim) { return Dim(embedx_dim) * sizeof(float); } static int unseen_days_index() { return 0; } static int delta_score_index() { return DownpourCtrFeatureValue::unseen_days_index() + 1; } - static int show_index() { + static int ShowIndex() { return DownpourCtrFeatureValue::delta_score_index() + 1; } - static int click_index() { - return DownpourCtrFeatureValue::show_index() + 1; - } - static int embed_w_index() { - return DownpourCtrFeatureValue::click_index() + 1; + static int ClickIndex() { return DownpourCtrFeatureValue::ShowIndex() + 1; } + static int Embed_W_Index() { + return DownpourCtrFeatureValue::ClickIndex() + 1; } static int embed_g2sum_index() { - return DownpourCtrFeatureValue::embed_w_index() + 1; + return DownpourCtrFeatureValue::Embed_W_Index() + 1; } - static int slot_index() { + static int SlotIndex() { return DownpourCtrFeatureValue::embed_g2sum_index() + 1; } static int embedx_g2sum_index() { - return DownpourCtrFeatureValue::slot_index() + 1; + return DownpourCtrFeatureValue::SlotIndex() + 1; } - static int embedx_w_index() { + static int Embedx_W_Index() { return DownpourCtrFeatureValue::embedx_g2sum_index() + 1; } static float& unseen_days(float* val) { @@ -76,17 +74,17 @@ class DownpourCtrAccessor : public ValueAccessor { static float& delta_score(float* val) { return val[DownpourCtrFeatureValue::delta_score_index()]; } - static float& show(float* val) { - return val[DownpourCtrFeatureValue::show_index()]; + static float& Show(float* val) { + return val[DownpourCtrFeatureValue::ShowIndex()]; } - static float& click(float* val) { - return val[DownpourCtrFeatureValue::click_index()]; + static float& Click(float* val) { + return val[DownpourCtrFeatureValue::ClickIndex()]; } - static float& slot(float* val) { - return val[DownpourCtrFeatureValue::slot_index()]; + static float& Slot(float* val) { + return val[DownpourCtrFeatureValue::SlotIndex()]; } - static float& embed_w(float* val) { - return val[DownpourCtrFeatureValue::embed_w_index()]; + static float& EmbedW(float* val) { + return val[DownpourCtrFeatureValue::Embed_W_Index()]; } static float& embed_g2sum(float* val) { return val[DownpourCtrFeatureValue::embed_g2sum_index()]; @@ -94,8 +92,8 @@ class DownpourCtrAccessor : public ValueAccessor { static float& embedx_g2sum(float* val) { return val[DownpourCtrFeatureValue::embedx_g2sum_index()]; } - static float* embedx_w(float* val) { - return (val + DownpourCtrFeatureValue::embedx_w_index()); + static float* EmbedxW(float* val) { + return (val + DownpourCtrFeatureValue::Embedx_W_Index()); } }; @@ -108,24 +106,24 @@ class DownpourCtrAccessor : public ValueAccessor { std::vector embedx_g; */ - static int dim(int embedx_dim) { return 4 + embedx_dim; } + static int Dim(int embedx_dim) { return 4 + embedx_dim; } - static int dim_size(int dim, int embedx_dim) { return sizeof(float); } - static int size(int embedx_dim) { return dim(embedx_dim) * sizeof(float); } - static int slot_index() { return 0; } - static int show_index() { return DownpourCtrPushValue::slot_index() + 1; } - static int click_index() { return DownpourCtrPushValue::show_index() + 1; } - static int embed_g_index() { - return DownpourCtrPushValue::click_index() + 1; - } - static int embedx_g_index() { - return DownpourCtrPushValue::embed_g_index() + 1; - } - static float& slot(float* val) { return val[0]; } - static float& show(float* val) { return val[1]; } - static float& click(float* val) { return val[2]; } - static float& embed_g(float* val) { return val[3]; } - static float* embedx_g(float* val) { return val + 4; } + static int DimSize(int dim, int embedx_dim) { return sizeof(float); } + static int Size(int embedx_dim) { return Dim(embedx_dim) * sizeof(float); } + static int SlotIndex() { return 0; } + static int ShowIndex() { return DownpourCtrPushValue::SlotIndex() + 1; } + static int ClickIndex() { return DownpourCtrPushValue::ShowIndex() + 1; } + static int Embed_G_Index() { + return DownpourCtrPushValue::ClickIndex() + 1; + } + static int Embedx_G_Index() { + return DownpourCtrPushValue::Embed_G_Index() + 1; + } + static float& Slot(float* val) { return val[0]; } + static float& Show(float* val) { return val[1]; } + static float& Click(float* val) { return val[2]; } + static float& EmbedG(float* val) { return val[3]; } + static float* EmbedxG(float* val) { return val + 4; } }; struct DownpourCtrPullValue { @@ -136,95 +134,95 @@ class DownpourCtrAccessor : public ValueAccessor { std::vector embedx_w; */ - static int dim(int embedx_dim) { return 3 + embedx_dim; } - static int dim_size(size_t dim) { return sizeof(float); } - static int size(int embedx_dim) { return dim(embedx_dim) * sizeof(float); } - static int show_index() { return 0; } - static int click_index() { return 1; } - static int embed_w_index() { return 2; } - static int embedx_w_index() { return 3; } - static float& show(float* val) { - return val[DownpourCtrPullValue::show_index()]; + static int Dim(int embedx_dim) { return 3 + embedx_dim; } + static int DimSize(size_t dim) { return sizeof(float); } + static int Size(int embedx_dim) { return Dim(embedx_dim) * sizeof(float); } + static int ShowIndex() { return 0; } + static int ClickIndex() { return 1; } + static int Embed_W_Index() { return 2; } + static int Embedx_W_Index() { return 3; } + static float& Show(float* val) { + return val[DownpourCtrPullValue::ShowIndex()]; } - static float& click(float* val) { - return val[DownpourCtrPullValue::click_index()]; + static float& Click(float* val) { + return val[DownpourCtrPullValue::ClickIndex()]; } - static float& embed_w(float* val) { - return val[DownpourCtrPullValue::embed_w_index()]; + static float& EmbedW(float* val) { + return val[DownpourCtrPullValue::Embed_W_Index()]; } - static float* embedx_w(float* val) { - return val + DownpourCtrPullValue::embedx_w_index(); + static float* EmbedxW(float* val) { + return val + DownpourCtrPullValue::Embedx_W_Index(); } }; DownpourCtrAccessor() {} virtual ~DownpourCtrAccessor() {} - virtual int initialize(); + virtual int Initialize(); virtual void SetTableInfo(AccessorInfo& info); virtual size_t GetTableInfo(InfoKey key); // value维度 - virtual size_t dim(); + size_t Dim(); // value各个维度的size - virtual size_t dim_size(size_t dim); + size_t DimSize(size_t dim); // value各维度相加总size - virtual size_t size(); + size_t Size(); // value中mf动态长度部分总size大小, sparse下生效 - virtual size_t mf_size(); + size_t MFSize(); // pull value维度 - virtual size_t select_dim(); + size_t SelectDim(); // pull value各个维度的size - virtual size_t select_dim_size(size_t dim); + size_t SelectDimSize(size_t dim); // pull value各维度相加总size - virtual size_t select_size(); + size_t SelectSize(); // push value维度 - virtual size_t update_dim(); + size_t UpdateDim(); // push value各个维度的size - virtual size_t update_dim_size(size_t dim); + size_t UpdateDimSize(size_t dim); // push value各维度相加总size - virtual size_t update_size(); + size_t UpdateSize(); // 判断该value是否进行shrink - virtual bool shrink(float* value); + virtual bool Shrink(float* value); // 判断该value是否保存到ssd virtual bool save_ssd(float* value); - virtual bool need_extend_mf(float* value); - virtual bool has_mf(size_t size); + virtual bool NeedExtendMF(float* value); + virtual bool HasMF(size_t size); // 判断该value是否在save阶段dump, // param作为参数用于标识save阶段,如downpour的xbox与batch_model // param = 0, save all feature // param = 1, save delta feature // param = 3, save all feature with time decay - virtual bool save(float* value, int param) override; + virtual bool Save(float* value, int param) override; // update delta_score and unseen_days after save - virtual void update_stat_after_save(float* value, int param) override; + virtual void UpdateStatAfterSave(float* value, int param) override; // virtual bool save_cache(float* value, int param, double // global_cache_threshold) override; // keys不存在时,为values生成随机值 // 要求value的内存由外部调用者分配完毕 - virtual int32_t create(float** value, size_t num); + virtual int32_t Create(float** value, size_t num); // 从values中选取到select_values中 - virtual int32_t select(float** select_values, const float** values, + virtual int32_t Select(float** select_values, const float** values, size_t num); // 将update_values聚合到一起 - virtual int32_t merge(float** update_values, + virtual int32_t Merge(float** update_values, const float** other_update_values, size_t num); // 将update_values聚合到一起,通过it.next判定是否进入下一个key - // virtual int32_t merge(float** update_values, iterator it); + // virtual int32_t Merge(float** update_values, iterator it); // 将update_values更新应用到values中 - virtual int32_t update(float** values, const float** update_values, + virtual int32_t Update(float** values, const float** update_values, size_t num); - virtual std::string parse_to_string(const float* value, int param) override; - virtual int32_t parse_from_string(const std::string& str, float* v) override; - virtual bool create_value(int type, const float* value); + virtual std::string ParseToString(const float* value, int param) override; + virtual int32_t ParseFromString(const std::string& str, float* v) override; + virtual bool CreateValue(int type, const float* value); //这个接口目前只用来取show - virtual float get_field(float* value, const std::string& name) override { + virtual float GetField(float* value, const std::string& name) override { CHECK(name == "show"); if (name == "show") { auto unseen_days = DownpourCtrFeatureValue::unseen_days(value); int16_t day_diff = _day_id - unseen_days; auto show_right = - DownpourCtrFeatureValue::show(value) * _time_decay_rates[day_diff]; + DownpourCtrFeatureValue::Show(value) * _time_decay_rates[day_diff]; return (float)show_right; } return 0.0; diff --git a/paddle/fluid/distributed/ps/table/memory_sparse_table.cc b/paddle/fluid/distributed/ps/table/memory_sparse_table.cc index 3f5c484eab8252..61ea2f8f2007e7 100644 --- a/paddle/fluid/distributed/ps/table/memory_sparse_table.cc +++ b/paddle/fluid/distributed/ps/table/memory_sparse_table.cc @@ -99,9 +99,9 @@ int32_t MemorySparseTable::load(const std::string& path, channel_config.path = file_list[file_start_idx + i]; VLOG(1) << "MemorySparseTable::load begin load " << channel_config.path << " into local shard " << i; - channel_config.converter = _value_accesor->converter(load_param).converter; + channel_config.converter = _value_accesor->Converter(load_param).converter; channel_config.deconverter = - _value_accesor->converter(load_param).deconverter; + _value_accesor->Converter(load_param).deconverter; bool is_read_failed = false; int retry_num = 0; @@ -119,8 +119,7 @@ int32_t MemorySparseTable::load(const std::string& path, uint64_t key = std::strtoul(line_data.data(), &end, 10); auto& value = shard[key]; value.resize(feature_value_size); - int parse_size = - _value_accesor->parse_from_string(++end, value.data()); + int parse_size = _value_accesor->ParseFromString(++end, value.data()); value.resize(parse_size); // for debug @@ -196,8 +195,7 @@ int32_t MemorySparseTable::load_local_fs(const std::string& path, uint64_t key = std::strtoul(line_data.data(), &end, 10); auto& value = shard[key]; value.resize(feature_value_size); - int parse_size = - _value_accesor->parse_from_string(++end, value.data()); + int parse_size = _value_accesor->ParseFromString(++end, value.data()); value.resize(parse_size); } file.close(); @@ -253,9 +251,9 @@ int32_t MemorySparseTable::save(const std::string& dirname, paddle::string::format_string("%s/part-%03d-%05d", table_path.c_str(), _shard_idx, file_start_idx + i); } - channel_config.converter = _value_accesor->converter(save_param).converter; + channel_config.converter = _value_accesor->Converter(save_param).converter; channel_config.deconverter = - _value_accesor->converter(save_param).deconverter; + _value_accesor->Converter(save_param).deconverter; bool is_write_failed = false; int feasign_size = 0; int retry_num = 0; @@ -268,8 +266,8 @@ int32_t MemorySparseTable::save(const std::string& dirname, auto write_channel = _afs_client.open_w(channel_config, 1024 * 1024 * 40, &err_no); for (auto it = shard.begin(); it != shard.end(); ++it) { - if (_value_accesor->save(it.value().data(), save_param)) { - std::string format_value = _value_accesor->parse_to_string( + if (_value_accesor->Save(it.value().data(), save_param)) { + std::string format_value = _value_accesor->ParseToString( it.value().data(), it.value().size()); if (0 != write_channel->write_line(paddle::string::format_string( @@ -302,7 +300,7 @@ int32_t MemorySparseTable::save(const std::string& dirname, } while (is_write_failed); feasign_size_all += feasign_size; for (auto it = shard.begin(); it != shard.end(); ++it) { - _value_accesor->update_stat_after_save(it.value().data(), save_param); + _value_accesor->UpdateStatAfterSave(it.value().data(), save_param); } LOG(INFO) << "MemorySparseTable save prefix success, path: " << channel_config.path; @@ -334,9 +332,9 @@ int32_t MemorySparseTable::save_local_fs(const std::string& dirname, std::ofstream os; os.open(file_name); for (auto it = shard.begin(); it != shard.end(); ++it) { - if (_value_accesor->save(it.value().data(), save_param)) { - std::string format_value = _value_accesor->parse_to_string( - it.value().data(), it.value().size()); + if (_value_accesor->Save(it.value().data(), save_param)) { + std::string format_value = + _value_accesor->ParseToString(it.value().data(), it.value().size()); std::string out_line = paddle::string::format_string( "%lu %s\n", it.key(), format_value.c_str()); // VLOG(2) << out_line.c_str(); @@ -370,7 +368,7 @@ int64_t MemorySparseTable::local_mf_size() { auto& local_shard = _local_shards[shard_id]; for (auto it = local_shard.begin(); it != local_shard.end(); ++it) { - if (_value_accesor->has_mf(it.value().size())) { + if (_value_accesor->HasMF(it.value().size())) { size_arr[shard_id] += 1; } } @@ -453,7 +451,7 @@ int32_t MemorySparseTable::pull_sparse(float* pull_values, auto& feature_value = local_shard[key]; feature_value.resize(data_size); float* data_ptr = feature_value.data(); - _value_accesor->create(&data_buffer_ptr, 1); + _value_accesor->Create(&data_buffer_ptr, 1); memcpy(data_ptr, data_buffer_ptr, data_size * sizeof(float)); } @@ -467,7 +465,7 @@ int32_t MemorySparseTable::pull_sparse(float* pull_values, } auto offset = keys[i].second; float* select_data = pull_values + select_value_size * offset; - _value_accesor->select(&select_data, + _value_accesor->Select(&select_data, (const float**)&data_buffer_ptr, 1); } @@ -484,8 +482,8 @@ int32_t MemorySparseTable::pull_sparse(float* pull_values, int32_t MemorySparseTable::pull_sparse_ptr(char** pull_values, const uint64_t* keys, size_t num) { CostTimer timer("pscore_sparse_select_all"); - size_t value_size = _value_accesor->size() / sizeof(float); - size_t mf_value_size = _value_accesor->mf_size() / sizeof(float); + size_t value_size = _value_accesor->GetTableInfo(SIZE) / sizeof(float); + size_t mf_value_size = _value_accesor->GetTableInfo(MF_SIZE) / sizeof(float); std::vector> tasks(_real_local_shard_num); std::vector>> task_keys( @@ -514,7 +512,7 @@ int32_t MemorySparseTable::pull_sparse_ptr(char** pull_values, auto& feature_value = local_shard[key]; feature_value.resize(data_size); float* data_ptr = feature_value.data(); - _value_accesor->create(&data_buffer_ptr, 1); + _value_accesor->Create(&data_buffer_ptr, 1); memcpy(data_ptr, data_buffer_ptr, data_size * sizeof(float)); ret = &feature_value; } else { @@ -564,13 +562,13 @@ int32_t MemorySparseTable::push_sparse(const uint64_t* keys, auto itr = local_shard.find(key); if (itr == local_shard.end()) { if (FLAGS_pserver_enable_create_feasign_randomly && - !_value_accesor->create_value(1, update_data)) { + !_value_accesor->CreateValue(1, update_data)) { continue; } auto value_size = value_col - mf_value_col; auto& feature_value = local_shard[key]; feature_value.resize(value_size); - _value_accesor->create(&data_buffer_ptr, 1); + _value_accesor->Create(&data_buffer_ptr, 1); memcpy(feature_value.data(), data_buffer_ptr, value_size * sizeof(float)); itr = local_shard.find(key); @@ -581,16 +579,16 @@ int32_t MemorySparseTable::push_sparse(const uint64_t* keys, size_t value_size = feature_value.size(); if (value_size == value_col) { // 已拓展到最大size, 则就地update - _value_accesor->update(&value_data, &update_data, 1); + _value_accesor->Update(&value_data, &update_data, 1); } else { // 拷入buffer区进行update,然后再回填,不需要的mf则回填时抛弃了 memcpy(data_buffer_ptr, value_data, value_size * sizeof(float)); - _value_accesor->update(&data_buffer_ptr, &update_data, 1); + _value_accesor->Update(&data_buffer_ptr, &update_data, 1); - if (_value_accesor->need_extend_mf(data_buffer)) { + if (_value_accesor->NeedExtendMF(data_buffer)) { feature_value.resize(value_col); value_data = feature_value.data(); - _value_accesor->create(&value_data, 1); + _value_accesor->Create(&value_data, 1); } memcpy(value_data, data_buffer_ptr, value_size * sizeof(float)); } @@ -641,13 +639,13 @@ int32_t MemorySparseTable::_push_sparse(const uint64_t* keys, auto itr = local_shard.find(key); if (itr == local_shard.end()) { if (FLAGS_pserver_enable_create_feasign_randomly && - !_value_accesor->create_value(1, update_data)) { + !_value_accesor->CreateValue(1, update_data)) { continue; } auto value_size = value_col - mf_value_col; auto& feature_value = local_shard[key]; feature_value.resize(value_size); - _value_accesor->create(&data_buffer_ptr, 1); + _value_accesor->Create(&data_buffer_ptr, 1); memcpy(feature_value.data(), data_buffer_ptr, value_size * sizeof(float)); itr = local_shard.find(key); @@ -656,15 +654,15 @@ int32_t MemorySparseTable::_push_sparse(const uint64_t* keys, float* value_data = feature_value.data(); size_t value_size = feature_value.size(); if (value_size == value_col) { // 已拓展到最大size, 则就地update - _value_accesor->update(&value_data, &update_data, 1); + _value_accesor->Update(&value_data, &update_data, 1); } else { // 拷入buffer区进行update,然后再回填,不需要的mf则回填时抛弃了 memcpy(data_buffer_ptr, value_data, value_size * sizeof(float)); - _value_accesor->update(&data_buffer_ptr, &update_data, 1); - if (_value_accesor->need_extend_mf(data_buffer)) { + _value_accesor->Update(&data_buffer_ptr, &update_data, 1); + if (_value_accesor->NeedExtendMF(data_buffer)) { feature_value.resize(value_col); value_data = feature_value.data(); - _value_accesor->create(&value_data, 1); + _value_accesor->Create(&value_data, 1); } memcpy(value_data, data_buffer_ptr, value_size * sizeof(float)); } @@ -688,7 +686,7 @@ int32_t MemorySparseTable::shrink(const std::string& param) { // shrink auto& shard = _local_shards[shard_id]; for (auto it = shard.begin(); it != shard.end();) { - if (_value_accesor->shrink(it.value().data())) { + if (_value_accesor->Shrink(it.value().data())) { it = shard.erase(it); } else { ++it; diff --git a/paddle/fluid/distributed/ps/table/sparse_accessor.cc b/paddle/fluid/distributed/ps/table/sparse_accessor.cc index 651ff9d00e49ac..511b36389aaee4 100644 --- a/paddle/fluid/distributed/ps/table/sparse_accessor.cc +++ b/paddle/fluid/distributed/ps/table/sparse_accessor.cc @@ -20,7 +20,7 @@ namespace paddle { namespace distributed { -int SparseAccessor::initialize() { +int SparseAccessor::Initialize() { auto name = _config.embed_sgd_param().name(); _embed_sgd_rule = CREATE_PSCORE_CLASS(SparseValueSGDRule, name); _embed_sgd_rule->load_config(_config.embed_sgd_param(), 1); @@ -39,73 +39,72 @@ int SparseAccessor::initialize() { } void SparseAccessor::SetTableInfo(AccessorInfo& info) { - info.dim = dim(); - info.size = size(); - info.select_dim = select_dim(); - info.select_size = select_size(); - info.update_dim = update_dim(); - info.update_size = update_size(); - info.mf_size = mf_size(); - info.fea_dim = fea_dim(); + info.dim = Dim(); + info.size = Size(); + info.select_dim = SelectDim(); + info.select_size = SelectSize(); + info.update_dim = UpdateDim(); + info.update_size = UpdateSize(); + info.mf_size = MFSize(); } size_t SparseAccessor::GetTableInfo(InfoKey key) { switch (key) { case DIM: - return dim(); + return Dim(); case SIZE: - return size(); + return Size(); case SELECT_DIM: - return select_dim(); + return SelectDim(); case SELECT_SIZE: - return select_size(); + return SelectSize(); case UPDATE_DIM: - return update_dim(); + return UpdateDim(); case UPDATE_SIZE: - return update_size(); + return UpdateSize(); case MF_SIZE: - return mf_size(); - case FEA_DIM: - return fea_dim(); + return MFSize(); + default: + return 0; } return 0; } -size_t SparseAccessor::dim() { return sparse_feature_value.dim(); } +size_t SparseAccessor::Dim() { return sparse_feature_value.Dim(); } -size_t SparseAccessor::dim_size(size_t dim) { +size_t SparseAccessor::DimSize(size_t dim) { auto embedx_dim = _config.embedx_dim(); - return sparse_feature_value.dim_size(dim, embedx_dim); + return sparse_feature_value.DimSize(dim, embedx_dim); } -size_t SparseAccessor::size() { return sparse_feature_value.size(); } +size_t SparseAccessor::Size() { return sparse_feature_value.Size(); } -size_t SparseAccessor::mf_size() { +size_t SparseAccessor::MFSize() { return (_config.embedx_dim() + sparse_feature_value.embedx_sgd_dim) * sizeof(float); // embedx embedx_g2sum } // pull value -size_t SparseAccessor::select_dim() { +size_t SparseAccessor::SelectDim() { auto embedx_dim = _config.embedx_dim(); return 1 + embedx_dim; } -size_t SparseAccessor::select_dim_size(size_t dim) { return sizeof(float); } +size_t SparseAccessor::SelectDimSize(size_t dim) { return sizeof(float); } -size_t SparseAccessor::select_size() { return select_dim() * sizeof(float); } +size_t SparseAccessor::SelectSize() { return SelectDim() * sizeof(float); } // push value -size_t SparseAccessor::update_dim() { +size_t SparseAccessor::UpdateDim() { auto embedx_dim = _config.embedx_dim(); return 4 + embedx_dim; } -size_t SparseAccessor::update_dim_size(size_t dim) { return sizeof(float); } +size_t SparseAccessor::UpdateDimSize(size_t dim) { return sizeof(float); } -size_t SparseAccessor::update_size() { return update_dim() * sizeof(float); } +size_t SparseAccessor::UpdateSize() { return UpdateDim() * sizeof(float); } -bool SparseAccessor::shrink(float* value) { +bool SparseAccessor::Shrink(float* value) { auto base_threshold = _config.ctr_accessor_param().base_threshold(); auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delete_after_unseen_days = @@ -113,12 +112,12 @@ bool SparseAccessor::shrink(float* value) { auto delete_threshold = _config.ctr_accessor_param().delete_threshold(); // time_decay first - sparse_feature_value.show(value) *= _show_click_decay_rate; - sparse_feature_value.click(value) *= _show_click_decay_rate; + sparse_feature_value.Show(value) *= _show_click_decay_rate; + sparse_feature_value.Click(value) *= _show_click_decay_rate; // shrink after - auto score = show_click_score(sparse_feature_value.show(value), - sparse_feature_value.click(value)); + auto score = show_click_score(sparse_feature_value.Show(value), + sparse_feature_value.Click(value)); auto unseen_days = sparse_feature_value.unseen_days(value); if (score < delete_threshold || unseen_days > delete_after_unseen_days) { return true; @@ -126,7 +125,7 @@ bool SparseAccessor::shrink(float* value) { return false; } -bool SparseAccessor::save(float* value, int param) { +bool SparseAccessor::Save(float* value, int param) { auto base_threshold = _config.ctr_accessor_param().base_threshold(); auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delta_keep_days = _config.ctr_accessor_param().delta_keep_days(); @@ -142,8 +141,8 @@ bool SparseAccessor::save(float* value, int param) { case 1: // save xbox base case 2: { - if (show_click_score(sparse_feature_value.show(value), - sparse_feature_value.click(value)) >= + if (show_click_score(sparse_feature_value.Show(value), + sparse_feature_value.Click(value)) >= base_threshold && sparse_feature_value.delta_score(value) >= delta_threshold && sparse_feature_value.unseen_days(value) <= delta_keep_days) { @@ -171,7 +170,7 @@ bool SparseAccessor::save(float* value, int param) { } } -void SparseAccessor::update_stat_after_save(float* value, int param) { +void SparseAccessor::UpdateStatAfterSave(float* value, int param) { auto base_threshold = _config.ctr_accessor_param().base_threshold(); auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delta_keep_days = _config.ctr_accessor_param().delta_keep_days(); @@ -180,8 +179,8 @@ void SparseAccessor::update_stat_after_save(float* value, int param) { } switch (param) { case 1: { - if (show_click_score(sparse_feature_value.show(value), - sparse_feature_value.click(value)) >= + if (show_click_score(sparse_feature_value.Show(value), + sparse_feature_value.Click(value)) >= base_threshold && sparse_feature_value.delta_score(value) >= delta_threshold && sparse_feature_value.unseen_days(value) <= delta_keep_days) { @@ -198,48 +197,48 @@ void SparseAccessor::update_stat_after_save(float* value, int param) { } } -int32_t SparseAccessor::create(float** values, size_t num) { +int32_t SparseAccessor::Create(float** values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* value = values[value_item]; value[sparse_feature_value.unseen_days_index()] = 0; value[sparse_feature_value.delta_score_index()] = 0; - value[sparse_feature_value.show_index()] = 0; - value[sparse_feature_value.click_index()] = 0; - value[sparse_feature_value.slot_index()] = -1; + value[sparse_feature_value.ShowIndex()] = 0; + value[sparse_feature_value.ClickIndex()] = 0; + value[sparse_feature_value.SlotIndex()] = -1; _embed_sgd_rule->init_value( - value + sparse_feature_value.embed_w_index(), + value + sparse_feature_value.Embed_W_Index(), value + sparse_feature_value.embed_g2sum_index()); _embedx_sgd_rule->init_value( - value + sparse_feature_value.embedx_w_index(), + value + sparse_feature_value.Embedx_W_Index(), value + sparse_feature_value.embedx_g2sum_index(), false); } return 0; } -bool SparseAccessor::need_extend_mf(float* value) { - float show = value[sparse_feature_value.show_index()]; - float click = value[sparse_feature_value.click_index()]; +bool SparseAccessor::NeedExtendMF(float* value) { + float show = value[sparse_feature_value.ShowIndex()]; + float click = value[sparse_feature_value.ClickIndex()]; float score = (show - click) * _config.ctr_accessor_param().nonclk_coeff() + click * _config.ctr_accessor_param().click_coeff(); return score >= _config.embedx_threshold(); } -bool SparseAccessor::has_mf(size_t size) { +bool SparseAccessor::HasMF(size_t size) { return size > sparse_feature_value.embedx_g2sum_index(); } // from SparseFeatureValue to SparsePullValue -int32_t SparseAccessor::select(float** select_values, const float** values, +int32_t SparseAccessor::Select(float** select_values, const float** values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* select_value = select_values[value_item]; const float* value = values[value_item]; - select_value[SparsePullValue::embed_w_index()] = - value[sparse_feature_value.embed_w_index()]; - memcpy(select_value + SparsePullValue::embedx_w_index(), - value + sparse_feature_value.embedx_w_index(), + select_value[SparsePullValue::Embed_W_Index()] = + value[sparse_feature_value.Embed_W_Index()]; + memcpy(select_value + SparsePullValue::Embedx_W_Index(), + value + sparse_feature_value.Embedx_W_Index(), embedx_dim * sizeof(float)); } return 0; @@ -248,15 +247,15 @@ int32_t SparseAccessor::select(float** select_values, const float** values, // from SparsePushValue to SparsePushValue // first dim: item // second dim: field num -int32_t SparseAccessor::merge(float** update_values, +int32_t SparseAccessor::Merge(float** update_values, const float** other_update_values, size_t num) { auto embedx_dim = _config.embedx_dim(); - size_t total_dim = SparsePushValue::dim(embedx_dim); + size_t total_dim = SparsePushValue::Dim(embedx_dim); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* other_update_value = other_update_values[value_item]; for (auto i = 0u; i < total_dim; ++i) { - if (i != SparsePushValue::slot_index()) { + if (i != SparsePushValue::SlotIndex()) { update_value[i] += other_update_value[i]; } } @@ -267,43 +266,43 @@ int32_t SparseAccessor::merge(float** update_values, // from SparsePushValue to SparseFeatureValue // first dim: item // second dim: field num -int32_t SparseAccessor::update(float** update_values, const float** push_values, +int32_t SparseAccessor::Update(float** update_values, const float** push_values, size_t num) { auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* push_value = push_values[value_item]; - float push_show = push_value[SparsePushValue::show_index()]; - float push_click = push_value[SparsePushValue::click_index()]; - float slot = push_value[SparsePushValue::slot_index()]; - update_value[sparse_feature_value.show_index()] += push_show; - update_value[sparse_feature_value.click_index()] += push_click; - update_value[sparse_feature_value.slot_index()] = slot; + float push_show = push_value[SparsePushValue::ShowIndex()]; + float push_click = push_value[SparsePushValue::ClickIndex()]; + float slot = push_value[SparsePushValue::SlotIndex()]; + update_value[sparse_feature_value.ShowIndex()] += push_show; + update_value[sparse_feature_value.ClickIndex()] += push_click; + update_value[sparse_feature_value.SlotIndex()] = slot; update_value[sparse_feature_value.delta_score_index()] += (push_show - push_click) * _config.ctr_accessor_param().nonclk_coeff() + push_click * _config.ctr_accessor_param().click_coeff(); update_value[sparse_feature_value.unseen_days_index()] = 0; _embed_sgd_rule->update_value( - update_value + sparse_feature_value.embed_w_index(), + update_value + sparse_feature_value.Embed_W_Index(), update_value + sparse_feature_value.embed_g2sum_index(), - push_value + SparsePushValue::embed_g_index()); + push_value + SparsePushValue::Embed_G_Index()); _embedx_sgd_rule->update_value( - update_value + sparse_feature_value.embedx_w_index(), + update_value + sparse_feature_value.Embedx_W_Index(), update_value + sparse_feature_value.embedx_g2sum_index(), - push_value + SparsePushValue::embedx_g_index()); + push_value + SparsePushValue::Embedx_G_Index()); } return 0; } -bool SparseAccessor::create_value(int stage, const float* value) { +bool SparseAccessor::CreateValue(int stage, const float* value) { // stage == 0, pull // stage == 1, push if (stage == 0) { return true; } else if (stage == 1) { // operation - auto show = SparsePushValue::show(const_cast(value)); - auto click = SparsePushValue::click(const_cast(value)); + auto show = SparsePushValue::Show(const_cast(value)); + auto click = SparsePushValue::Click(const_cast(value)); auto score = show_click_score(show, click); if (score <= 0) { return false; @@ -324,34 +323,34 @@ float SparseAccessor::show_click_score(float show, float click) { return (show - click) * nonclk_coeff + click * click_coeff; } -std::string SparseAccessor::parse_to_string(const float* v, int param) { +std::string SparseAccessor::ParseToString(const float* v, int param) { thread_local std::ostringstream os; os.clear(); os.str(""); os << v[0] << " " << v[1] << " " << v[2] << " " << v[3] << " " << v[4] << " " << v[5]; for (int i = sparse_feature_value.embed_g2sum_index(); - i < sparse_feature_value.embedx_w_index(); i++) { + i < sparse_feature_value.Embedx_W_Index(); i++) { os << " " << v[i]; } - auto show = sparse_feature_value.show(const_cast(v)); - auto click = sparse_feature_value.click(const_cast(v)); + auto show = sparse_feature_value.Show(const_cast(v)); + auto click = sparse_feature_value.Click(const_cast(v)); auto score = show_click_score(show, click); if (score >= _config.embedx_threshold() && - param > sparse_feature_value.embedx_w_index()) { - for (auto i = sparse_feature_value.embedx_w_index(); - i < sparse_feature_value.dim(); ++i) { + param > sparse_feature_value.Embedx_W_Index()) { + for (auto i = sparse_feature_value.Embedx_W_Index(); + i < sparse_feature_value.Dim(); ++i) { os << " " << v[i]; } } return os.str(); } -int SparseAccessor::parse_from_string(const std::string& str, float* value) { +int SparseAccessor::ParseFromString(const std::string& str, float* value) { int embedx_dim = _config.embedx_dim(); _embedx_sgd_rule->init_value( - value + sparse_feature_value.embedx_w_index(), + value + sparse_feature_value.Embedx_W_Index(), value + sparse_feature_value.embedx_g2sum_index()); auto ret = paddle::string::str_to_float(str.data(), value); CHECK(ret >= 6) << "expect more than 6 real:" << ret; diff --git a/paddle/fluid/distributed/ps/table/sparse_accessor.h b/paddle/fluid/distributed/ps/table/sparse_accessor.h index cdc4c1dc6200e9..b11acff6aaaa3d 100644 --- a/paddle/fluid/distributed/ps/table/sparse_accessor.h +++ b/paddle/fluid/distributed/ps/table/sparse_accessor.h @@ -40,27 +40,27 @@ class SparseAccessor : public ValueAccessor { std::float embedx_g2sum; */ - int dim() { return 6 + embed_sgd_dim + embedx_sgd_dim + embedx_dim; } - int dim_size(size_t dim, int embedx_dim) { return sizeof(float); } - int size() { return dim() * sizeof(float); } - int slot_index() { return 0; } - int unseen_days_index() { return slot_index() + 1; } + int Dim() { return 6 + embed_sgd_dim + embedx_sgd_dim + embedx_dim; } + int DimSize(size_t dim, int embedx_dim) { return sizeof(float); } + int Size() { return Dim() * sizeof(float); } + int SlotIndex() { return 0; } + int unseen_days_index() { return SlotIndex() + 1; } int delta_score_index() { return unseen_days_index() + 1; } - int show_index() { return delta_score_index() + 1; } - int click_index() { return show_index() + 1; } - int embed_w_index() { return click_index() + 1; } - int embed_g2sum_index() { return embed_w_index() + 1; } - int embedx_w_index() { return embed_g2sum_index() + embed_sgd_dim; } - int embedx_g2sum_index() { return embedx_w_index() + embedx_dim; } + int ShowIndex() { return delta_score_index() + 1; } + int ClickIndex() { return ShowIndex() + 1; } + int Embed_W_Index() { return ClickIndex() + 1; } + int embed_g2sum_index() { return Embed_W_Index() + 1; } + int Embedx_W_Index() { return embed_g2sum_index() + embed_sgd_dim; } + int embedx_g2sum_index() { return Embedx_W_Index() + embedx_dim; } float& unseen_days(float* val) { return val[unseen_days_index()]; } float& delta_score(float* val) { return val[delta_score_index()]; } - float& show(float* val) { return val[show_index()]; } - float& click(float* val) { return val[click_index()]; } - float& slot(float* val) { return val[slot_index()]; } - float& embed_w(float* val) { return val[embed_w_index()]; } + float& Show(float* val) { return val[ShowIndex()]; } + float& Click(float* val) { return val[ClickIndex()]; } + float& Slot(float* val) { return val[SlotIndex()]; } + float& EmbedW(float* val) { return val[Embed_W_Index()]; } float& embed_g2sum(float* val) { return val[embed_g2sum_index()]; } - float& embedx_w(float* val) { return val[embedx_w_index()]; } + float& EmbedxW(float* val) { return val[Embedx_W_Index()]; } float& embedx_g2sum(float* val) { return val[embedx_g2sum_index()]; } int embed_sgd_dim; @@ -77,29 +77,25 @@ class SparseAccessor : public ValueAccessor { std::vector embedx_g; */ - static int dim(int embedx_dim) { return 4 + embedx_dim; } - - static int dim_size(int dim, int embedx_dim) { return sizeof(float); } - static int size(int embedx_dim) { return dim(embedx_dim) * sizeof(float); } - static int slot_index() { return 0; } - static int show_index() { return SparsePushValue::slot_index() + 1; } - static int click_index() { return SparsePushValue::show_index() + 1; } - static int embed_g_index() { return SparsePushValue::click_index() + 1; } - static int embedx_g_index() { return SparsePushValue::embed_g_index() + 1; } - static float& slot(float* val) { - return val[SparsePushValue::slot_index()]; + static int Dim(int embedx_dim) { return 4 + embedx_dim; } + + static int DimSize(int dim, int embedx_dim) { return sizeof(float); } + static int Size(int embedx_dim) { return Dim(embedx_dim) * sizeof(float); } + static int SlotIndex() { return 0; } + static int ShowIndex() { return SparsePushValue::SlotIndex() + 1; } + static int ClickIndex() { return SparsePushValue::ShowIndex() + 1; } + static int Embed_G_Index() { return SparsePushValue::ClickIndex() + 1; } + static int Embedx_G_Index() { return SparsePushValue::Embed_G_Index() + 1; } + static float& Slot(float* val) { return val[SparsePushValue::SlotIndex()]; } + static float& Show(float* val) { return val[SparsePushValue::ShowIndex()]; } + static float& Click(float* val) { + return val[SparsePushValue::ClickIndex()]; } - static float& show(float* val) { - return val[SparsePushValue::show_index()]; + static float& EmbedG(float* val) { + return val[SparsePushValue::Embed_G_Index()]; } - static float& click(float* val) { - return val[SparsePushValue::click_index()]; - } - static float& embed_g(float* val) { - return val[SparsePushValue::embed_g_index()]; - } - static float* embedx_g(float* val) { - return val + SparsePushValue::embedx_g_index(); + static float* EmbedxG(float* val) { + return val + SparsePushValue::Embedx_G_Index(); } }; @@ -109,82 +105,82 @@ class SparseAccessor : public ValueAccessor { std::vector embedx_w; */ - static int dim(int embedx_dim) { return 1 + embedx_dim; } - static int dim_size(size_t dim) { return sizeof(float); } - static int size(int embedx_dim) { return dim(embedx_dim) * sizeof(float); } - static int embed_w_index() { return 0; } - static int embedx_w_index() { return 1; } - static float& embed_w(float* val) { - return val[SparsePullValue::embed_w_index()]; + static int Dim(int embedx_dim) { return 1 + embedx_dim; } + static int DimSize(size_t dim) { return sizeof(float); } + static int Size(int embedx_dim) { return Dim(embedx_dim) * sizeof(float); } + static int Embed_W_Index() { return 0; } + static int Embedx_W_Index() { return 1; } + static float& EmbedW(float* val) { + return val[SparsePullValue::Embed_W_Index()]; } - static float* embedx_w(float* val) { - return val + SparsePullValue::embedx_w_index(); + static float* EmbedxW(float* val) { + return val + SparsePullValue::Embedx_W_Index(); } }; SparseAccessor() {} - virtual int initialize(); + virtual int Initialize(); virtual void SetTableInfo(AccessorInfo& info); virtual size_t GetTableInfo(InfoKey key); virtual ~SparseAccessor() {} // value维度 - virtual size_t dim(); + size_t Dim(); // value各个维度的size - virtual size_t dim_size(size_t dim); + size_t DimSize(size_t dim); // value各维度相加总size - virtual size_t size(); + size_t Size(); // value中mf动态长度部分总size大小, sparse下生效 - virtual size_t mf_size(); + size_t MFSize(); // pull value维度 - virtual size_t select_dim(); + size_t SelectDim(); // pull value各个维度的size - virtual size_t select_dim_size(size_t dim); + size_t SelectDimSize(size_t dim); // pull value各维度相加总size - virtual size_t select_size(); + size_t SelectSize(); // push value维度 - virtual size_t update_dim(); + size_t UpdateDim(); // push value各个维度的size - virtual size_t update_dim_size(size_t dim); + size_t UpdateDimSize(size_t dim); // push value各维度相加总size - virtual size_t update_size(); + size_t UpdateSize(); // 判断该value是否进行shrink - virtual bool shrink(float* value); + virtual bool Shrink(float* value); // 判断该value是否保存到ssd // virtual bool save_ssd(float* value); - virtual bool need_extend_mf(float* value); - virtual bool has_mf(size_t size); + virtual bool NeedExtendMF(float* value); + virtual bool HasMF(size_t size); // 判断该value是否在save阶段dump, // param作为参数用于标识save阶段,如downpour的xbox与batch_model // param = 0, save all feature // param = 1, save delta feature // param = 2, save xbox base feature - bool save(float* value, int param) override; + bool Save(float* value, int param) override; // update delta_score and unseen_days after save - void update_stat_after_save(float* value, int param) override; + void UpdateStatAfterSave(float* value, int param) override; // keys不存在时,为values生成随机值 // 要求value的内存由外部调用者分配完毕 - virtual int32_t create(float** value, size_t num); + virtual int32_t Create(float** value, size_t num); // 从values中选取到select_values中 - virtual int32_t select(float** select_values, const float** values, + virtual int32_t Select(float** select_values, const float** values, size_t num); // 将update_values聚合到一起 - virtual int32_t merge(float** update_values, + virtual int32_t Merge(float** update_values, const float** other_update_values, size_t num); // 将update_values聚合到一起,通过it.next判定是否进入下一个key - // virtual int32_t merge(float** update_values, iterator it); + // virtual int32_t Merge(float** update_values, iterator it); // 将update_values更新应用到values中 - virtual int32_t update(float** values, const float** update_values, + virtual int32_t Update(float** values, const float** update_values, size_t num); - std::string parse_to_string(const float* value, int param) override; - int32_t parse_from_string(const std::string& str, float* v) override; - virtual bool create_value(int type, const float* value); + std::string ParseToString(const float* value, int param) override; + int32_t ParseFromString(const std::string& str, float* v) override; + virtual bool CreateValue(int type, const float* value); // 这个接口目前只用来取show - float get_field(float* value, const std::string& name) override { + float GetField(float* value, const std::string& name) override { // CHECK(name == "show"); if (name == "show") { - return sparse_feature_value.show(value); + return sparse_feature_value.Show(value); } return 0.0; } diff --git a/paddle/fluid/distributed/ps/table/table.cc b/paddle/fluid/distributed/ps/table/table.cc index 6faa3e2632e28c..99790606f0b31b 100644 --- a/paddle/fluid/distributed/ps/table/table.cc +++ b/paddle/fluid/distributed/ps/table/table.cc @@ -97,7 +97,7 @@ int32_t Table::initialize_accessor() { << ", accessor_name:" << _config.accessor().accessor_class(); return -1; } - if (accessor->configure(_config.accessor()) || accessor->initialize() != 0) { + if (accessor->Configure(_config.accessor()) || accessor->Initialize() != 0) { LOG(ERROR) << " accessor initialize failed, table_id:" << _config.table_id() << ", accessor_name:" << _config.accessor().accessor_class(); return -1; diff --git a/paddle/fluid/distributed/ps/table/tensor_accessor.cc b/paddle/fluid/distributed/ps/table/tensor_accessor.cc index 77014141783c39..43b791b6ac03b2 100644 --- a/paddle/fluid/distributed/ps/table/tensor_accessor.cc +++ b/paddle/fluid/distributed/ps/table/tensor_accessor.cc @@ -18,86 +18,70 @@ namespace paddle { namespace distributed { -int CommMergeAccessor::initialize() { return 0; } +int CommMergeAccessor::Initialize() { return 0; } void CommMergeAccessor::SetTableInfo(AccessorInfo &info) { - info.dim = dim(); - info.size = size(); - info.select_dim = select_dim(); - info.select_size = select_size(); - info.update_dim = update_dim(); - info.update_size = update_size(); - info.mf_size = mf_size(); + info.select_dim = SelectDim(); + info.select_size = SelectSize(); + info.update_dim = UpdateDim(); + info.update_size = UpdateSize(); info.fea_dim = fea_dim(); } size_t CommMergeAccessor::GetTableInfo(InfoKey key) { switch (key) { - case DIM: - return dim(); - case SIZE: - return size(); case SELECT_DIM: - return select_dim(); + return SelectDim(); case SELECT_SIZE: - return select_size(); + return SelectSize(); case UPDATE_DIM: - return update_dim(); + return UpdateDim(); case UPDATE_SIZE: - return update_size(); - case MF_SIZE: - return mf_size(); + return UpdateSize(); case FEA_DIM: return fea_dim(); + default: + return 0; } return 0; } -// value 维度 -size_t CommMergeAccessor::dim() { return 0; } - -// value 各个维度的size -size_t CommMergeAccessor::dim_size(size_t dim) { return 0; } - -// value 各维度相加总size -size_t CommMergeAccessor::size() { return 0; } - // pull value 维度 -size_t CommMergeAccessor::select_dim() { return _config.embedx_dim(); } +size_t CommMergeAccessor::SelectDim() { return _config.embedx_dim(); } // pull value 各个维度的size -size_t CommMergeAccessor::select_dim_size(size_t dim) { return sizeof(float); } +size_t CommMergeAccessor::SelectDimSize(size_t dim) { return sizeof(float); } // pull value 各维度相加总size -size_t CommMergeAccessor::select_size() { return select_dim() * sizeof(float); } +size_t CommMergeAccessor::SelectSize() { return SelectDim() * sizeof(float); } // push value 维度 -size_t CommMergeAccessor::update_dim() { return _config.embedx_dim(); } +size_t CommMergeAccessor::UpdateDim() { return _config.embedx_dim(); } // push value 各个维度的size -size_t CommMergeAccessor::update_dim_size(size_t dim) { return sizeof(float); } +size_t CommMergeAccessor::UpdateDimSize(size_t dim) { return sizeof(float); } // push value 各维度相加总size -size_t CommMergeAccessor::update_size() { return update_dim() * sizeof(float); } +size_t CommMergeAccessor::UpdateSize() { return UpdateDim() * sizeof(float); } // 判断该value 是否进行shrink -bool CommMergeAccessor::shrink(float * /*value*/) { return false; } +bool CommMergeAccessor::Shrink(float * /*value*/) { return false; } // 判断该value 是否在save阶段dump, // param作为参数用于标识save阶段,如downpour的xbox与batch_model -bool CommMergeAccessor::save(float * /*value*/, int /*param*/) { return true; } +bool CommMergeAccessor::Save(float * /*value*/, int /*param*/) { return true; } // keys不存在时,为values生成随机值 -int32_t CommMergeAccessor::create(float **value, size_t num) { return 0; } +int32_t CommMergeAccessor::Create(float **value, size_t num) { return 0; } // 从values中选取到select_values中 -int32_t CommMergeAccessor::select(float **select_values, const float **values, +int32_t CommMergeAccessor::Select(float **select_values, const float **values, size_t num) { return 0; } // 将update_values聚合到一起 -int32_t CommMergeAccessor::merge(float **update_values, +int32_t CommMergeAccessor::Merge(float **update_values, const float **other_update_values, size_t num) { Eigen::Map u_mat(update_values[0], 1, num); @@ -109,13 +93,13 @@ int32_t CommMergeAccessor::merge(float **update_values, // 将update_values聚合到一起,通过it.next判定是否进入下一个key // int32_t merge(float** update_values, iterator it); // 将update_values更新应用到values中 -int32_t CommMergeAccessor::update(float **values, const float **update_values, +int32_t CommMergeAccessor::Update(float **values, const float **update_values, size_t num) { return 0; } -int CommMergeAccessor::set_weight(float **values, const float **update_values, - size_t num) { +int CommMergeAccessor::SetWeight(float **values, const float **update_values, + size_t num) { return 0; } diff --git a/paddle/fluid/distributed/ps/table/tensor_accessor.h b/paddle/fluid/distributed/ps/table/tensor_accessor.h index 6f5b69a392bc58..1b454fe0c734be 100644 --- a/paddle/fluid/distributed/ps/table/tensor_accessor.h +++ b/paddle/fluid/distributed/ps/table/tensor_accessor.h @@ -29,53 +29,49 @@ class CommMergeAccessor : public ValueAccessor { public: CommMergeAccessor() {} virtual ~CommMergeAccessor() {} - virtual int initialize(); + virtual int Initialize(); virtual void SetTableInfo(AccessorInfo &info); virtual size_t GetTableInfo(InfoKey key); // value维度 - virtual size_t dim(); - // value各个维度的size - virtual size_t dim_size(size_t dim); - // value各维度相加总size - virtual size_t size(); // pull value维度 - virtual size_t select_dim(); + size_t SelectDim(); // pull value各个维度的size - virtual size_t select_dim_size(size_t dim); + size_t SelectDimSize(size_t dim); // pull value各维度相加总size - virtual size_t select_size(); + size_t SelectSize(); // push value维度 - virtual size_t update_dim(); + size_t UpdateDim(); // push value各个维度的size - virtual size_t update_dim_size(size_t dim); + size_t UpdateDimSize(size_t dim); // push value各维度相加总size - virtual size_t update_size(); + size_t UpdateSize(); + size_t fea_dim() { return _config.fea_dim(); } // 判断该value是否进行shrink - virtual bool shrink(float * /*value*/); + virtual bool Shrink(float * /*value*/); // 判断该value是否在save阶段dump, // param作为参数用于标识save阶段,如downpour的xbox与batch_model - virtual bool save(float * /*value*/, int /*param*/); + virtual bool Save(float * /*value*/, int /*param*/); // keys不存在时,为values生成随机值 - virtual int32_t create(float **value, size_t num); + virtual int32_t Create(float **value, size_t num); // 从values中选取到select_values中 - virtual int32_t select(float **select_values, const float **values, + virtual int32_t Select(float **select_values, const float **values, size_t num); // 将update_values聚合到一起 - virtual int32_t merge(float **update_values, + virtual int32_t Merge(float **update_values, const float **other_update_values, size_t num); // 将update_values聚合到一起,通过it.next判定是否进入下一个key - // virtual int32_t merge(float** update_values, iterator it); + // virtual int32_t Merge(float** update_values, iterator it); // 将update_values更新应用到values中 - virtual int32_t update(float **values, const float **update_values, + virtual int32_t Update(float **values, const float **update_values, size_t num); - virtual int set_weight(float **values, const float **update_values, - size_t num); - virtual std::string parse_to_string(const float *value, int param) { + virtual int SetWeight(float **values, const float **update_values, + size_t num); + virtual std::string ParseToString(const float *value, int param) { return ""; } - virtual int parse_from_string(const std::string &str, float *v) { return 0; } + virtual int ParseFromString(const std::string &str, float *v) { return 0; } }; } // namespace distributed } // namespace paddle diff --git a/paddle/fluid/distributed/test/ctr_accessor_test.cc b/paddle/fluid/distributed/test/ctr_accessor_test.cc index 835b1a361573d4..8d9d0abd2394ce 100644 --- a/paddle/fluid/distributed/test/ctr_accessor_test.cc +++ b/paddle/fluid/distributed/test/ctr_accessor_test.cc @@ -67,49 +67,49 @@ TableAccessorParameter gen_param() { TEST(downpour_feature_value_accessor_test, test_shrink) { TableAccessorParameter parameter = gen_param(); CtrCommonAccessor* acc = new CtrCommonAccessor(); - ASSERT_EQ(acc->configure(parameter), 0); - ASSERT_EQ(acc->initialize(), 0); + ASSERT_EQ(acc->Configure(parameter), 0); + ASSERT_EQ(acc->Initialize(), 0); VLOG(3) << "size of struct: " << acc->common_feature_value.embed_sgd_dim << " " << acc->common_feature_value.embedx_dim << " " << acc->common_feature_value.embedx_sgd_dim << " " - << acc->common_feature_value.dim() << "\n"; + << acc->common_feature_value.Dim() << "\n"; - float* value = new float[acc->dim()]; - for (auto i = 0u; i < acc->dim(); ++i) { + float* value = new float[acc->Dim()]; + for (auto i = 0u; i < acc->Dim(); ++i) { value[i] = i * 1.0; } - ASSERT_TRUE(!acc->shrink(value)); + ASSERT_TRUE(!acc->Shrink(value)); // set unseen_days too long value[1] = 1000; // set delta score too small value[2] = 0.001; - ASSERT_TRUE(acc->shrink(value)); + ASSERT_TRUE(acc->Shrink(value)); } TEST(downpour_feature_value_accessor_test, test_save) { TableAccessorParameter parameter = gen_param(); CtrCommonAccessor* acc = new CtrCommonAccessor(); - ASSERT_EQ(acc->configure(parameter), 0); - ASSERT_EQ(acc->initialize(), 0); + ASSERT_EQ(acc->Configure(parameter), 0); + ASSERT_EQ(acc->Initialize(), 0); - float* value = new float[acc->dim()]; - for (auto i = 0u; i < acc->dim(); ++i) { + float* value = new float[acc->Dim()]; + for (auto i = 0u; i < acc->Dim(); ++i) { value[i] = i * 1.0; } // save all feature - ASSERT_TRUE(acc->save(value, 0)); + ASSERT_TRUE(acc->Save(value, 0)); // save delta feature - ASSERT_TRUE(acc->save(value, 1)); + ASSERT_TRUE(acc->Save(value, 1)); // save base feature with time decay - ASSERT_TRUE(acc->save(value, 2)); + ASSERT_TRUE(acc->Save(value, 2)); VLOG(3) << "test_save:"; - for (auto i = 0u; i < acc->dim(); ++i) { + for (auto i = 0u; i < acc->Dim(); ++i) { VLOG(3) << value[i]; } } @@ -117,8 +117,8 @@ TEST(downpour_feature_value_accessor_test, test_save) { TEST(downpour_feature_value_accessor_test, test_create) { TableAccessorParameter parameter = gen_param(); CtrCommonAccessor* acc = new CtrCommonAccessor(); - ASSERT_EQ(acc->configure(parameter), 0); - ASSERT_EQ(acc->initialize(), 0); + ASSERT_EQ(acc->Configure(parameter), 0); + ASSERT_EQ(acc->Initialize(), 0); const int field_size = 7 + 8; const int item_size = 10; @@ -127,7 +127,7 @@ TEST(downpour_feature_value_accessor_test, test_create) { for (auto i = 0u; i < item_size; ++i) { value[i] = new float[field_size]; } - ASSERT_EQ(acc->create(value, item_size), 0); + ASSERT_EQ(acc->Create(value, item_size), 0); for (auto i = 0u; i < item_size; ++i) { for (auto j = 0u; j < field_size; ++j) { @@ -141,11 +141,11 @@ TEST(downpour_feature_value_accessor_test, test_create) { TEST(downpour_feature_value_accessor_test, test_update) { TableAccessorParameter parameter = gen_param(); CtrCommonAccessor* acc = new CtrCommonAccessor(); - ASSERT_EQ(acc->configure(parameter), 0); - ASSERT_EQ(acc->initialize(), 0); + ASSERT_EQ(acc->Configure(parameter), 0); + ASSERT_EQ(acc->Initialize(), 0); - VLOG(3) << "dim: " << acc->common_feature_value.dim() << "\n"; - VLOG(3) << "update_dim: " << acc->update_dim() << "\n"; + VLOG(3) << "dim: " << acc->common_feature_value.Dim() << "\n"; + VLOG(3) << "update_dim: " << acc->GetTableInfo(UPDATE_DIM) << "\n"; const int field_size = 7 + 8; const int item_size = 10; @@ -162,8 +162,8 @@ TEST(downpour_feature_value_accessor_test, test_update) { typedef const float* const_float_ptr; const_float_ptr* grad = new const_float_ptr[item_size]; for (auto i = 0u; i < item_size; ++i) { - float* p = new float[acc->update_dim()]; - for (auto j = 0u; j < acc->update_dim(); ++j) { + float* p = new float[acc->GetTableInfo(UPDATE_DIM)]; + for (auto j = 0u; j < acc->GetTableInfo(UPDATE_DIM); ++j) { p[j] = i; } grad[i] = p; @@ -251,14 +251,14 @@ TEST(downpour_feature_value_accessor_test, test_update) { acc->_embedx_sgd_rule->update_value(&v.embedx_w[0], &v.embedx_g2sum[0], &push_v.embedx_g[0]); - float* ptr = new float[acc->dim()]; + float* ptr = new float[acc->Dim()]; v.to_array(ptr, parameter.embedx_dim()); exp_value.push_back(ptr); } - acc->update(value, grad, item_size); + acc->Update(value, grad, item_size); for (auto i = 0u; i < item_size; ++i) { - for (auto j = 0u; j < acc->dim(); ++j) { + for (auto j = 0u; j < acc->Dim(); ++j) { VLOG(3) << value[i][j] << ":" << exp_value[i][j] << " "; ASSERT_FLOAT_EQ(value[i][j], exp_value[i][j]); } @@ -268,8 +268,8 @@ TEST(downpour_feature_value_accessor_test, test_update) { TEST(downpour_feature_value_accessor_test, test_show_click_score) { TableAccessorParameter parameter = gen_param(); CtrCommonAccessor* acc = new CtrCommonAccessor(); - ASSERT_EQ(acc->configure(parameter), 0); - ASSERT_EQ(acc->initialize(), 0); + ASSERT_EQ(acc->Configure(parameter), 0); + ASSERT_EQ(acc->Initialize(), 0); float show = 10; float click = 6; @@ -279,8 +279,8 @@ TEST(downpour_feature_value_accessor_test, test_show_click_score) { TEST(downpour_feature_value_accessor_test, test_string_related) { TableAccessorParameter parameter = gen_param(); CtrCommonAccessor* acc = new CtrCommonAccessor(); - ASSERT_EQ(acc->configure(parameter), 0); - ASSERT_EQ(acc->initialize(), 0); + ASSERT_EQ(acc->Configure(parameter), 0); + ASSERT_EQ(acc->Initialize(), 0); const int field_size = 15; float* value = new float[field_size]; @@ -288,12 +288,12 @@ TEST(downpour_feature_value_accessor_test, test_string_related) { value[i] = i; } - auto str = acc->parse_to_string(value, 0); + auto str = acc->ParseToString(value, 0); VLOG(3) << str << std::endl; str = "0 1 2 3 4 5 6"; - ASSERT_NE(acc->parse_from_string(str, value), 0); + ASSERT_NE(acc->ParseFromString(str, value), 0); // make sure init_zero=true for (auto i = 7; i < 15; ++i) {