diff --git a/_typos.toml b/_typos.toml index 1f913a25de8ad6..b331be2575b27a 100644 --- a/_typos.toml +++ b/_typos.toml @@ -80,18 +80,6 @@ framwork = 'framwork' frequence = 'frequence' fron = 'fron' fullfill = 'fullfill' -identiy = 'identiy' -indentify = 'indentify' -implemention = 'implemention' -implementated = 'implementated' -implementd = 'implementd' -impliment = 'impliment' -implment = 'implment' -implicitely = 'implicitely' -Inproper = 'Inproper' -inconsistence = 'inconsistence' -uncorrectly = 'uncorrectly' -Indext = 'Indext' Indexs = 'Indexs' indexs = 'indexs' indiates = 'indiates' diff --git a/paddle/fluid/framework/fleet/heter_ps/heter_comm_kernel.cu b/paddle/fluid/framework/fleet/heter_ps/heter_comm_kernel.cu index eed2f4f8f5c9af..c29d4419e7a3de 100644 --- a/paddle/fluid/framework/fleet/heter_ps/heter_comm_kernel.cu +++ b/paddle/fluid/framework/fleet/heter_ps/heter_comm_kernel.cu @@ -335,7 +335,7 @@ __global__ void scatter_dvals_by_unit_kernel(TUnit* d_dest_vals, } } -// cuda implemention of heter_comm_kernel.h +// cuda implementation of heter_comm_kernel.h template void HeterCommKernel::fill_idx(T* idx, int64_t len, diff --git a/paddle/phi/api/generator/dist_api_gen.py b/paddle/phi/api/generator/dist_api_gen.py index 297959a6385a30..7a9783a1ebed8c 100644 --- a/paddle/phi/api/generator/dist_api_gen.py +++ b/paddle/phi/api/generator/dist_api_gen.py @@ -458,7 +458,7 @@ """ # TODO(GhostScreaming): Some operators generate shape info in runtime, -# bincount. As a result, dist_output's global shape is set uncorrectly, +# bincount. As a result, dist_output's global shape is set incorrectly, # because it's generated in InferMeta function. A temporally solution is # use black op list to set DistTensor shape extra. SINGLE_SET_DIST_OUT_DIMS = """ @@ -615,7 +615,7 @@ def parse_infer_meta(self, infer_meta_config): infer_meta['local_shape'] = None # Inplace op that changes shape should not change its global shape # in inferMeta, otherwise, it may fails in reshard pass because of - # the inconsistence of dist_atttr and shape. + # the inconsistency of dist_atttr and shape. if 'global_shape' not in infer_meta_config: infer_meta['global_shape'] = None return infer_meta diff --git a/paddle/phi/kernels/gpu/cross_entropy_kernel.cu b/paddle/phi/kernels/gpu/cross_entropy_kernel.cu index fee669f40317fe..1f1c43d80b7275 100644 --- a/paddle/phi/kernels/gpu/cross_entropy_kernel.cu +++ b/paddle/phi/kernels/gpu/cross_entropy_kernel.cu @@ -1289,7 +1289,7 @@ void CrossEntropyWithSoftmaxCUDAKernel(const GPUContext& dev_ctx, return; } - // if axis is not the last, we need a new impliment + // if axis is not the last, we need a new implement if (soft_label) { auto* logits_data = softmax->data(); auto* labels_data = labels.data(); diff --git a/paddle/utils/optional.h b/paddle/utils/optional.h index 3323495c9c5b33..81f43a2088fe87 100644 --- a/paddle/utils/optional.h +++ b/paddle/utils/optional.h @@ -25,7 +25,7 @@ #include #include -#include "none.h" +#include "paddle/utils/none.h" namespace paddle { @@ -104,7 +104,7 @@ class reference_content { public: // structors ~reference_content() {} - reference_content(RefT r) : content_(r) {} + reference_content(RefT r) : content_(r) {} // NOLINT reference_content(const reference_content& operand) : content_(operand.content_) {} @@ -207,11 +207,13 @@ class optional_base : public optional_tag { // Creates an optional uninitialized. // No-throw - optional_base(none_t) : m_initialized(false) {} + optional_base(none_t) : m_initialized(false) {} // NOLINT // Creates an optional initialized with 'val'. // Can throw if T::T(T const&) does - optional_base(argument_type val) : m_initialized(false) { construct(val); } + optional_base(argument_type val) : m_initialized(false) { // NOLINT + construct(val); + } // Creates an optional initialized with 'val' IFF cond is true, otherwise // creates an uninitialzed optional. @@ -342,7 +344,7 @@ class optional_base : public optional_tag { construct(factory, tag); } - // Constructs using any expression implicitely convertible to the single + // Constructs using any expression implicitly convertible to the single // argument // of a one-argument T constructor. // Converting constructions of optional from optional uses this function @@ -355,7 +357,7 @@ class optional_base : public optional_tag { m_initialized = true; } - // Assigns using a form any expression implicitely convertible to the single + // Assigns using a form any expression implicitly convertible to the single // argument // of a T's assignment operator. // Converting assignments of optional from optional uses this function @@ -473,11 +475,11 @@ class optional : public optional_detail::optional_base { // Creates an optional uninitialized. // No-throw - optional(none_t none_) : base(none_) {} + optional(none_t none_) : base(none_) {} // NOLINT // Creates an optional initialized with 'val'. // Can throw if T::T(T const&) does - optional(argument_type val) : base(val) {} + optional(argument_type val) : base(val) {} // NOLINT // Creates an optional initialized with 'val' IFF cond is true, otherwise // creates an uninitialized optional. @@ -495,7 +497,7 @@ class optional : public optional_detail::optional_base { // Creates an optional with an expression which can be either // (a) An instance of InPlaceFactory (i.e. in_place(a,b,...,n); // (b) An instance of TypedInPlaceFactory ( i.e. in_place(a,b,...,n); - // (c) Any expression implicitely convertible to the single type + // (c) Any expression implicitly convertible to the single type // of a one-argument T's constructor. // (d*) Weak compilers (BCB) might also resolved Expr as optional and // optional @@ -623,7 +625,7 @@ inline typename optional::reference_const_type get(optional const& opt) { } template -inline typename optional::reference_type get(optional& opt) { +inline typename optional::reference_type get(optional& opt) { // NOLINT return opt.get(); } @@ -651,7 +653,7 @@ inline typename optional::reference_const_type get_optional_value_or( template inline typename optional::reference_type get_optional_value_or( - optional& opt, typename optional::reference_type v) { + optional& opt, typename optional::reference_type v) { // NOLINT return opt.get_value_or(v); } @@ -665,7 +667,8 @@ inline typename optional::pointer_const_type get_pointer( } template -inline typename optional::pointer_type get_pointer(optional& opt) { +inline typename optional::pointer_type get_pointer( + optional& opt) { // NOLINT return opt.get_ptr(); } diff --git a/python/paddle/distributed/passes/pass_utils.py b/python/paddle/distributed/passes/pass_utils.py index bed3a436cd1bd3..2c5acec6bd085a 100644 --- a/python/paddle/distributed/passes/pass_utils.py +++ b/python/paddle/distributed/passes/pass_utils.py @@ -1119,7 +1119,7 @@ def add_persistable_var(op_idx, program_type): f"Cannot infer chunk_id for op {op.name()} at index {idx}" ) - # Step2.2: indentify the job_type of the op + # Step2.2: identify the job_type of the op if op_role == int(OpRole.Optimize): job_type = "optimizer" elif op_role == int(OpRole.Backward) and split_bw: diff --git a/python/paddle/profiler/profiler.py b/python/paddle/profiler/profiler.py index 615742ec28bc45..8c25fb2bad918a 100644 --- a/python/paddle/profiler/profiler.py +++ b/python/paddle/profiler/profiler.py @@ -675,7 +675,7 @@ def stop(self) -> None: self.record_event = None if self.current_state == ProfilerState.READY: warn( - "Inproper Profiler state transform: READY->CLOSED, profiler will start and stop without saving data" + "Improper Profiler state transform: READY->CLOSED, profiler will start and stop without saving data" ) self.profiler.start() self.profiler.stop() diff --git a/test/deprecated/legacy_test/auto_parallel_op_test.py b/test/deprecated/legacy_test/auto_parallel_op_test.py index bf0250c31d6a6e..ba5dece7168f2d 100644 --- a/test/deprecated/legacy_test/auto_parallel_op_test.py +++ b/test/deprecated/legacy_test/auto_parallel_op_test.py @@ -352,7 +352,7 @@ def convert_input_dims_map_to_placements( return placements_map -# TODO: This method has been implementd in +# TODO: This method has been implemented in # paddle/phi/core/distributed/auto_parallel/placement_types.h, bind it # python and it's logic. def placements_to_dims_map(placements: list, tensor_ndim: int) -> tuple[int]: diff --git a/test/ir/pir/cinn/test_anchor_fusion.py b/test/ir/pir/cinn/test_anchor_fusion.py index 80a8300d4ce596..f995cf520c349a 100644 --- a/test/ir/pir/cinn/test_anchor_fusion.py +++ b/test/ir/pir/cinn/test_anchor_fusion.py @@ -66,7 +66,7 @@ def check_accuracy_and_kernel_num( if kernel_num is not None: utils.check_jit_kernel_number(static_compute, kernel_num) - def test_identiy_iters_fusion(self): + def test_identity_iters_fusion(self): # T # / | \ # / | \ diff --git a/test/legacy_test/auto_parallel_op_test.py b/test/legacy_test/auto_parallel_op_test.py index c5c593bd8d4d0a..fc790b6c4b98e4 100644 --- a/test/legacy_test/auto_parallel_op_test.py +++ b/test/legacy_test/auto_parallel_op_test.py @@ -351,7 +351,7 @@ def convert_input_dims_map_to_placements( return placements_map -# TODO: This method has been implementd in +# TODO: This method has been implemented in # paddle/phi/core/distributed/auto_parallel/placement_types.h, bind it # python and it's logic. def placements_to_dims_map(placements: list, tensor_ndim: int) -> tuple[int]: diff --git a/test/legacy_test/test_dist_fleet_base.py b/test/legacy_test/test_dist_fleet_base.py index 2d0637de5eb735..5f390448d69f35 100644 --- a/test/legacy_test/test_dist_fleet_base.py +++ b/test/legacy_test/test_dist_fleet_base.py @@ -47,7 +47,7 @@ class FleetDistRunnerBase: """ run_pserver,run_trainer : after init role, using transpiler split program - net : implment by child class, the network of model + net : implement by child class, the network of model do training : exe run program """ diff --git a/test/legacy_test/test_dist_fleet_heter_base.py b/test/legacy_test/test_dist_fleet_heter_base.py index e55308e4619e1f..ed503c34190707 100644 --- a/test/legacy_test/test_dist_fleet_heter_base.py +++ b/test/legacy_test/test_dist_fleet_heter_base.py @@ -40,7 +40,7 @@ class FleetDistHeterRunnerBase: """ run_pserver,run_trainer : after init role, using transpiler split program - net : implment by child class, the network of model + net : implement by child class, the network of model do training : exe run program """ diff --git a/test/legacy_test/test_elementwise_div_op.py b/test/legacy_test/test_elementwise_div_op.py index 5598f5125f8a29..6d0aad1230cdcb 100644 --- a/test/legacy_test/test_elementwise_div_op.py +++ b/test/legacy_test/test_elementwise_div_op.py @@ -195,7 +195,7 @@ def compute_gradient_y(self, grad_out, out, y): ) class TestElementwiseDivOpBF16(ElementwiseDivOp): def init_args(self): - # In due to output data type inconsistence of bfloat16 paddle op, we disable the dygraph check. + # In due to output data type inconsistency of bfloat16 paddle op, we disable the dygraph check. self.check_dygraph = False self.place = core.CUDAPlace(0) diff --git a/test/legacy_test/test_quantile_and_nanquantile.py b/test/legacy_test/test_quantile_and_nanquantile.py index 0fe29a0eb09729..6185516065c965 100644 --- a/test/legacy_test/test_quantile_and_nanquantile.py +++ b/test/legacy_test/test_quantile_and_nanquantile.py @@ -28,7 +28,7 @@ class TestQuantileAndNanquantile(unittest.TestCase): """ This class is used for numerical precision testing. If there is a corresponding numpy API, the precision comparison can be performed directly. - Otherwise, it needs to be verified by numpy implementated function. + Otherwise, it needs to be verified by numpy implemented function. """ def setUp(self): diff --git a/tools/CrossStackProfiler/CspFileReader.py b/tools/CrossStackProfiler/CspFileReader.py index 75cf430e3dbae5..047f19377e4df8 100755 --- a/tools/CrossStackProfiler/CspFileReader.py +++ b/tools/CrossStackProfiler/CspFileReader.py @@ -144,18 +144,18 @@ def _checkArgs(self): self._checkArgsKey("minTimeStamp", int) def getFileListByGroup(self, groupId): - lIndext = 0 - rIndext = 0 + lIndex = 0 + rIndex = 0 if self._organizeForm == FILEORGANIZEFORM_BYTRAINER: - lIndext = groupId * self._groupSize - rIndext = (groupId + 1) * self._groupSize + lIndex = groupId * self._groupSize + rIndex = (groupId + 1) * self._groupSize elif self._organizeForm == FILEORGANIZEFORM_BYRANK: - lIndext = groupId * self._groupSize * self._gpuPerTrainer - rIndext = (groupId + 1) * self._groupSize * self._gpuPerTrainer + lIndex = groupId * self._groupSize * self._gpuPerTrainer + rIndex = (groupId + 1) * self._groupSize * self._gpuPerTrainer try: - return self._fileList[lIndext:rIndext] + return self._fileList[lIndex:rIndex] except IndexError: raise IndexError("invalid index of file list")