Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][I-[1-10]] Fix typo(identiy,indentify,implemention,implementd,implementated,impliment,implment,implicitely,Inproper,inconsistence,uncorrectly,Indext) #70558

Merged
merged 8 commits into from
Jan 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 0 additions & 12 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,18 +80,6 @@ framwork = 'framwork'
frequence = 'frequence'
fron = 'fron'
fullfill = 'fullfill'
identiy = 'identiy'
indentify = 'indentify'
implemention = 'implemention'
implementated = 'implementated'
implementd = 'implementd'
impliment = 'impliment'
implment = 'implment'
implicitely = 'implicitely'
Inproper = 'Inproper'
inconsistence = 'inconsistence'
uncorrectly = 'uncorrectly'
Indext = 'Indext'
Indexs = 'Indexs'
indexs = 'indexs'
indiates = 'indiates'
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/fleet/heter_ps/heter_comm_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ __global__ void scatter_dvals_by_unit_kernel(TUnit* d_dest_vals,
}
}

// cuda implemention of heter_comm_kernel.h
// cuda implementation of heter_comm_kernel.h
template <typename T, typename StreamType>
void HeterCommKernel::fill_idx(T* idx,
int64_t len,
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/api/generator/dist_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@
"""

# TODO(GhostScreaming): Some operators generate shape info in runtime,
# bincount. As a result, dist_output's global shape is set uncorrectly,
# bincount. As a result, dist_output's global shape is set incorrectly,
# because it's generated in InferMeta function. A temporally solution is
# use black op list to set DistTensor shape extra.
SINGLE_SET_DIST_OUT_DIMS = """
Expand Down Expand Up @@ -615,7 +615,7 @@ def parse_infer_meta(self, infer_meta_config):
infer_meta['local_shape'] = None
# Inplace op that changes shape should not change its global shape
# in inferMeta, otherwise, it may fails in reshard pass because of
# the inconsistence of dist_atttr and shape.
# the inconsistency of dist_atttr and shape.
if 'global_shape' not in infer_meta_config:
infer_meta['global_shape'] = None
return infer_meta
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/cross_entropy_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -1289,7 +1289,7 @@ void CrossEntropyWithSoftmaxCUDAKernel(const GPUContext& dev_ctx,
return;
}

// if axis is not the last, we need a new impliment
// if axis is not the last, we need a new implement
if (soft_label) {
auto* logits_data = softmax->data<T>();
auto* labels_data = labels.data<T>();
Expand Down
27 changes: 15 additions & 12 deletions paddle/utils/optional.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
#include <new>
#include <type_traits>

#include "none.h"
#include "paddle/utils/none.h"

namespace paddle {

Expand Down Expand Up @@ -104,7 +104,7 @@ class reference_content {
public: // structors
~reference_content() {}

reference_content(RefT r) : content_(r) {}
reference_content(RefT r) : content_(r) {} // NOLINT

reference_content(const reference_content& operand)
: content_(operand.content_) {}
Expand Down Expand Up @@ -207,11 +207,13 @@ class optional_base : public optional_tag {

// Creates an optional<T> uninitialized.
// No-throw
optional_base(none_t) : m_initialized(false) {}
optional_base(none_t) : m_initialized(false) {} // NOLINT

// Creates an optional<T> initialized with 'val'.
// Can throw if T::T(T const&) does
optional_base(argument_type val) : m_initialized(false) { construct(val); }
optional_base(argument_type val) : m_initialized(false) { // NOLINT
construct(val);
}

// Creates an optional<T> initialized with 'val' IFF cond is true, otherwise
// creates an uninitialzed optional<T>.
Expand Down Expand Up @@ -342,7 +344,7 @@ class optional_base : public optional_tag {
construct(factory, tag);
}

// Constructs using any expression implicitely convertible to the single
// Constructs using any expression implicitly convertible to the single
// argument
// of a one-argument T constructor.
// Converting constructions of optional<T> from optional<U> uses this function
Expand All @@ -355,7 +357,7 @@ class optional_base : public optional_tag {
m_initialized = true;
}

// Assigns using a form any expression implicitely convertible to the single
// Assigns using a form any expression implicitly convertible to the single
// argument
// of a T's assignment operator.
// Converting assignments of optional<T> from optional<U> uses this function
Expand Down Expand Up @@ -473,11 +475,11 @@ class optional : public optional_detail::optional_base<T> {

// Creates an optional<T> uninitialized.
// No-throw
optional(none_t none_) : base(none_) {}
optional(none_t none_) : base(none_) {} // NOLINT

// Creates an optional<T> initialized with 'val'.
// Can throw if T::T(T const&) does
optional(argument_type val) : base(val) {}
optional(argument_type val) : base(val) {} // NOLINT

// Creates an optional<T> initialized with 'val' IFF cond is true, otherwise
// creates an uninitialized optional.
Expand All @@ -495,7 +497,7 @@ class optional : public optional_detail::optional_base<T> {
// Creates an optional<T> with an expression which can be either
// (a) An instance of InPlaceFactory (i.e. in_place(a,b,...,n);
// (b) An instance of TypedInPlaceFactory ( i.e. in_place<T>(a,b,...,n);
// (c) Any expression implicitely convertible to the single type
// (c) Any expression implicitly convertible to the single type
// of a one-argument T's constructor.
// (d*) Weak compilers (BCB) might also resolved Expr as optional<T> and
// optional<U>
Expand Down Expand Up @@ -623,7 +625,7 @@ inline typename optional<T>::reference_const_type get(optional<T> const& opt) {
}

template <class T>
inline typename optional<T>::reference_type get(optional<T>& opt) {
inline typename optional<T>::reference_type get(optional<T>& opt) { // NOLINT
return opt.get();
}

Expand Down Expand Up @@ -651,7 +653,7 @@ inline typename optional<T>::reference_const_type get_optional_value_or(

template <class T>
inline typename optional<T>::reference_type get_optional_value_or(
optional<T>& opt, typename optional<T>::reference_type v) {
optional<T>& opt, typename optional<T>::reference_type v) { // NOLINT
return opt.get_value_or(v);
}

Expand All @@ -665,7 +667,8 @@ inline typename optional<T>::pointer_const_type get_pointer(
}

template <class T>
inline typename optional<T>::pointer_type get_pointer(optional<T>& opt) {
inline typename optional<T>::pointer_type get_pointer(
optional<T>& opt) { // NOLINT
return opt.get_ptr();
}

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/passes/pass_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1119,7 +1119,7 @@ def add_persistable_var(op_idx, program_type):
f"Cannot infer chunk_id for op {op.name()} at index {idx}"
)

# Step2.2: indentify the job_type of the op
# Step2.2: identify the job_type of the op
if op_role == int(OpRole.Optimize):
job_type = "optimizer"
elif op_role == int(OpRole.Backward) and split_bw:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/profiler/profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -675,7 +675,7 @@ def stop(self) -> None:
self.record_event = None
if self.current_state == ProfilerState.READY:
warn(
"Inproper Profiler state transform: READY->CLOSED, profiler will start and stop without saving data"
"Improper Profiler state transform: READY->CLOSED, profiler will start and stop without saving data"
)
self.profiler.start()
self.profiler.stop()
Expand Down
2 changes: 1 addition & 1 deletion test/deprecated/legacy_test/auto_parallel_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ def convert_input_dims_map_to_placements(
return placements_map


# TODO: This method has been implementd in
# TODO: This method has been implemented in
# paddle/phi/core/distributed/auto_parallel/placement_types.h, bind it
# python and it's logic.
def placements_to_dims_map(placements: list, tensor_ndim: int) -> tuple[int]:
Expand Down
2 changes: 1 addition & 1 deletion test/ir/pir/cinn/test_anchor_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def check_accuracy_and_kernel_num(
if kernel_num is not None:
utils.check_jit_kernel_number(static_compute, kernel_num)

def test_identiy_iters_fusion(self):
def test_identity_iters_fusion(self):
# T
# / | \
# / | \
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/auto_parallel_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ def convert_input_dims_map_to_placements(
return placements_map


# TODO: This method has been implementd in
# TODO: This method has been implemented in
# paddle/phi/core/distributed/auto_parallel/placement_types.h, bind it
# python and it's logic.
def placements_to_dims_map(placements: list, tensor_ndim: int) -> tuple[int]:
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_dist_fleet_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
class FleetDistRunnerBase:
"""
run_pserver,run_trainer : after init role, using transpiler split program
net : implment by child class, the network of model
net : implement by child class, the network of model
do training : exe run program
"""

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_dist_fleet_heter_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
class FleetDistHeterRunnerBase:
"""
run_pserver,run_trainer : after init role, using transpiler split program
net : implment by child class, the network of model
net : implement by child class, the network of model
do training : exe run program
"""

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_elementwise_div_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def compute_gradient_y(self, grad_out, out, y):
)
class TestElementwiseDivOpBF16(ElementwiseDivOp):
def init_args(self):
# In due to output data type inconsistence of bfloat16 paddle op, we disable the dygraph check.
# In due to output data type inconsistency of bfloat16 paddle op, we disable the dygraph check.
self.check_dygraph = False
self.place = core.CUDAPlace(0)

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_quantile_and_nanquantile.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class TestQuantileAndNanquantile(unittest.TestCase):
"""
This class is used for numerical precision testing. If there is
a corresponding numpy API, the precision comparison can be performed directly.
Otherwise, it needs to be verified by numpy implementated function.
Otherwise, it needs to be verified by numpy implemented function.
"""

def setUp(self):
Expand Down
14 changes: 7 additions & 7 deletions tools/CrossStackProfiler/CspFileReader.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,18 +144,18 @@ def _checkArgs(self):
self._checkArgsKey("minTimeStamp", int)

def getFileListByGroup(self, groupId):
lIndext = 0
rIndext = 0
lIndex = 0
rIndex = 0

if self._organizeForm == FILEORGANIZEFORM_BYTRAINER:
lIndext = groupId * self._groupSize
rIndext = (groupId + 1) * self._groupSize
lIndex = groupId * self._groupSize
rIndex = (groupId + 1) * self._groupSize
elif self._organizeForm == FILEORGANIZEFORM_BYRANK:
lIndext = groupId * self._groupSize * self._gpuPerTrainer
rIndext = (groupId + 1) * self._groupSize * self._gpuPerTrainer
lIndex = groupId * self._groupSize * self._gpuPerTrainer
rIndex = (groupId + 1) * self._groupSize * self._gpuPerTrainer

try:
return self._fileList[lIndext:rIndext]
return self._fileList[lIndex:rIndex]
except IndexError:
raise IndexError("invalid index of file list")

Expand Down
Loading