diff --git a/_typos.toml b/_typos.toml index c22b667ae35c0..8ff15c1751e78 100644 --- a/_typos.toml +++ b/_typos.toml @@ -47,13 +47,6 @@ beacuse = 'beacuse' becasue = 'becasue' Becasue = 'Becasue' becuase = 'becuase' -befor = 'befor' -befores = 'befores' -begining = 'begining' -benfit = 'benfit' -Betweent = 'Betweent' -betweeen = 'betweeen' -bindins = 'bindins' blokc = 'blokc' blcok = 'blcok' bootom = 'bootom' diff --git a/paddle/cinn/hlir/dialect/operator/ir/generate_shape_util.cc b/paddle/cinn/hlir/dialect/operator/ir/generate_shape_util.cc index fae4b0ccf8536..1844a0e7ed661 100644 --- a/paddle/cinn/hlir/dialect/operator/ir/generate_shape_util.cc +++ b/paddle/cinn/hlir/dialect/operator/ir/generate_shape_util.cc @@ -376,12 +376,12 @@ MakeGetterDimExpr4SymbolName( const std::function& DimExpr4InputDim) { std::unordered_map> - symbol_name2symbol_bindins{}; + symbol_name2symbol_bindings{}; for (const auto& symbol_binding : symbol_bindings) { - symbol_name2symbol_bindins[GetSymbolNameBySymbolBinding(symbol_binding)] + symbol_name2symbol_bindings[GetSymbolNameBySymbolBinding(symbol_binding)] .emplace_back(symbol_binding); } - return [map = std::move(symbol_name2symbol_bindins), DimExpr4InputDim]( + return [map = std::move(symbol_name2symbol_bindings), DimExpr4InputDim]( const std::string& symbol_name) -> std::optional { const auto& iter = map.find(symbol_name); if (iter == map.end()) return std::nullopt; diff --git a/paddle/cinn/optim/lower_function_call_bind_vars.cc b/paddle/cinn/optim/lower_function_call_bind_vars.cc index 81f8c311e95ed..581e97744a2ba 100644 --- a/paddle/cinn/optim/lower_function_call_bind_vars.cc +++ b/paddle/cinn/optim/lower_function_call_bind_vars.cc @@ -55,7 +55,7 @@ struct LowerFunctionCallBindVarsMutator : public ir::IRMutator<> { } } - // insert the extra var arguments to the begining of the original call's + // insert the extra var arguments to the beginning of the original call's // argument list. node->read_args.insert(std::begin(op->read_args), extra_var_args.begin(), diff --git a/paddle/fluid/eager/general_grad.h b/paddle/fluid/eager/general_grad.h index b6cd2c3f9e4c4..eaaa46c2f5852 100644 --- a/paddle/fluid/eager/general_grad.h +++ b/paddle/fluid/eager/general_grad.h @@ -592,7 +592,7 @@ class GeneralGrad { // Purify potentialstartup_ops, remove those nodes that are the same as // input_target_nodes PurifyPotentialStartUpNodes(); - // Get Graph Info Betweent input target gradnode and outputs + // Get Graph Info Between input target gradnode and outputs // Record the depending_nodes_ and potential_startup_nodes_ GetGraphInfoBetweenTargets(*queue); // Update Graph Info, remove some nodes in diff --git a/paddle/fluid/framework/data_feed.cu b/paddle/fluid/framework/data_feed.cu index 91b91b4f7a2fa..8ddf8e751fb9e 100644 --- a/paddle/fluid/framework/data_feed.cu +++ b/paddle/fluid/framework/data_feed.cu @@ -4182,7 +4182,7 @@ void GraphDataGenerator::DoSageForInfer() { total_instance = 2; d_type_keys = reinterpret_cast( d_device_keys_[tensor_pair_idx][infer_cursor_[tensor_pair_idx]] - ->ptr()); // copy from begining + ->ptr()); // copy from beginning } else { d_type_keys += infer_node_start_[tensor_pair_idx]; infer_node_start_[tensor_pair_idx] += total_instance / 2; diff --git a/paddle/fluid/framework/fleet/ps_gpu_wrapper.cc b/paddle/fluid/framework/fleet/ps_gpu_wrapper.cc index 379df91f29676..2e6df061da17c 100644 --- a/paddle/fluid/framework/fleet/ps_gpu_wrapper.cc +++ b/paddle/fluid/framework/fleet/ps_gpu_wrapper.cc @@ -2152,7 +2152,7 @@ void PSGPUWrapper::BeginPass() { common::errors::Fatal("[BeginPass] current task is not ended.")); } - debug_gpu_memory_info("befor build task"); + debug_gpu_memory_info("before build task"); build_task(); debug_gpu_memory_info("after build task"); timer.Pause(); diff --git a/paddle/fluid/framework/new_executor/interpreter/dependency_builder.cc b/paddle/fluid/framework/new_executor/interpreter/dependency_builder.cc index 85a2810ddee84..ce11504ef0a3b 100644 --- a/paddle/fluid/framework/new_executor/interpreter/dependency_builder.cc +++ b/paddle/fluid/framework/new_executor/interpreter/dependency_builder.cc @@ -783,10 +783,10 @@ void DependencyBuilderSimplify::GetAllbehind() { } }; for (size_t i = start_index_; i < op_num_; i++) { - auto& behinds = ops_behind_[i]; - auto& befores = ops_before_[i]; - for (auto before_op : befores) { - for (auto behind_op : behinds) { + auto& behind_ops = ops_behind_[i]; + auto& before_ops = ops_before_[i]; + for (auto before_op : before_ops) { + for (auto behind_op : behind_ops) { update_op_happen_before(before_op, behind_op); } } diff --git a/paddle/fluid/inference/tensorrt/convert/tile_op.cc b/paddle/fluid/inference/tensorrt/convert/tile_op.cc index c443c31c113fa..667386b11bd5b 100644 --- a/paddle/fluid/inference/tensorrt/convert/tile_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/tile_op.cc @@ -78,7 +78,7 @@ class TileOpConverter : public OpConverter { itensors.push_back(input_shape_tensor); input_shape_tensor = Concat(itensors); // need reshape input to more dims. - input = Reshape(input, input_shape_tensor, "reshape_input_befor_slice"); + input = Reshape(input, input_shape_tensor, "reshape_input_before_slice"); repeat_expand_tensor = repeat_tensor; } else { repeat_expand_tensor = repeat_tensor; diff --git a/paddle/fluid/pir/transforms/onednn/cpu_bfloat16_squash_pass.cc b/paddle/fluid/pir/transforms/onednn/cpu_bfloat16_squash_pass.cc index 786611019e343..7c5909fbe9583 100644 --- a/paddle/fluid/pir/transforms/onednn/cpu_bfloat16_squash_pass.cc +++ b/paddle/fluid/pir/transforms/onednn/cpu_bfloat16_squash_pass.cc @@ -396,20 +396,20 @@ class CPUBf16QuantizeSquashPass : public pir::PatternRewritePass { pir::RewritePatternSet InitializePatterns(pir::IrContext *context) override { pir::RewritePatternSet ps(context); - uint32_t benfit = 100; + uint32_t benefit = 100; auto q_dq_onednn_pattern = std::make_unique( - context, benfit--, std::vector{}); + context, benefit--, std::vector{}); ps.Add(std::move(q_dq_onednn_pattern)); auto q_dq_multi_onednn_pattern = std::make_unique( - context, benfit--, std::vector{}); + context, benefit--, std::vector{}); ps.Add(std::move(q_dq_multi_onednn_pattern)); auto q_conv_onednn_pattern = std::make_unique( context, - benfit--, + benefit--, std::vector{ paddle::onednn::dialect::FusedConv2dOp::name(), }); @@ -418,14 +418,14 @@ class CPUBf16QuantizeSquashPass : public pir::PatternRewritePass { auto q_fusedconv_onednn_pattern = std::make_unique( context, - benfit--, + benefit--, std::vector{ paddle::onednn::dialect::FusedConv2dOp::name(), }); ps.Add(std::move(q_fusedconv_onednn_pattern)); auto op_dq_onednn_pattern = std::make_unique( - context, benfit--, std::vector{}); + context, benefit--, std::vector{}); ps.Add(std::move(op_dq_onednn_pattern)); return ps; diff --git a/python/paddle/distributed/auto_parallel/static/completion.py b/python/paddle/distributed/auto_parallel/static/completion.py index e8579131a27c5..0fb06082d3afa 100644 --- a/python/paddle/distributed/auto_parallel/static/completion.py +++ b/python/paddle/distributed/auto_parallel/static/completion.py @@ -1224,7 +1224,7 @@ def set_process_mesh(block, op, process_mesh, var_to_process_mesh): end_op_index = i break - # all ops betweeen start_op_index and end_op_index should not be ignored + # all ops between start_op_index and end_op_index should not be ignored for i in range(start_op_index, end_op_index + 1): struct_name = ops[i].struct_name m = regex.search(struct_name) diff --git a/test/legacy_test/test_array_read_write_op.py b/test/legacy_test/test_array_read_write_op.py index a749fa721e7e6..77dfc3d68d00e 100644 --- a/test/legacy_test/test_array_read_write_op.py +++ b/test/legacy_test/test_array_read_write_op.py @@ -244,7 +244,7 @@ def test_array_backward(self): feed={'d0': d}, fetch_list=[mean.name, d0.grad_name, mem_array.grad_name], ) - # this ans is wrong array is empty at begining ,so it no grad. + # this ans is wrong array is empty at beginning ,so it no grad. np.testing.assert_allclose(res[2], [[0.1] * 10], rtol=1e-05) mean = 0.6097253 diff --git a/tools/check_file_diff_approvals.sh b/tools/check_file_diff_approvals.sh index b370b5253a6ce..3c0a22a78efc1 100644 --- a/tools/check_file_diff_approvals.sh +++ b/tools/check_file_diff_approvals.sh @@ -182,7 +182,7 @@ fi HAS_MODIFIED_API_FW_BW_YAML=`git diff --name-only upstream/$BRANCH | grep -E "paddle/phi/ops/yaml/ops.yaml|paddle/phi/ops/yaml/backward.yaml" || true` if [ "${HAS_MODIFIED_API_FW_BW_YAML}" != "" ] && [ "${GIT_PR_ID}" != "" ]; then - echo_line="You must be approved by zyfncg or heavyrain-lzy for paddle/phi/ops/yaml/ops.yaml or paddle/phi/ops/yaml/backward.yaml changes, which manage the generated code for the C++ OP. You can only change them according to the specification at the begining of this two file.\n Recommend you obtain approval from gongshaotian or Hongqing-work, if only modified the InferSymbolicShapeInterface interfaces in the YAML file.\n" + echo_line="You must be approved by zyfncg or heavyrain-lzy for paddle/phi/ops/yaml/ops.yaml or paddle/phi/ops/yaml/backward.yaml changes, which manage the generated code for the C++ OP. You can only change them according to the specification at the beginning of this two file.\n Recommend you obtain approval from gongshaotian or Hongqing-work, if only modified the InferSymbolicShapeInterface interfaces in the YAML file.\n" check_approval 1 zyfncg heavyrain-lzy gongshaotian Hongqing-work fi