Skip to content

Commit

Permalink
[CodeStyle][Typos][D-[8-12]] Fix typos(defalut,defualt,defaut,`…
Browse files Browse the repository at this point in the history
…Defaule`,`defered`,`Defered`,`defind`,`defeine`,`defition`,`defination`) (#70505)

* fix

* del

* fix

* Update paddle/fluid/pybind/pybind.cc

---------

Co-authored-by: Nyakku Shigure <[email protected]>
  • Loading branch information
enkilee and SigureMo authored Dec 30, 2024
1 parent 9520747 commit 12b4e5f
Show file tree
Hide file tree
Showing 18 changed files with 34 additions and 44 deletions.
10 changes: 0 additions & 10 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,16 +49,6 @@ deciamls = 'deciamls'
decalared = 'decalared'
decompse = 'decompse'
decompositing = 'decompositing'
defalut = 'defalut'
Defaule = 'Defaule'
defaut = 'defaut'
defualt = 'defualt'
defered = 'defered'
Defered = 'Defered'
defind = 'defind'
defeine = 'defeine'
defition = 'defition'
defination = 'defination'
defferent = 'defferent'
differenciation = 'differenciation'
differnt = 'differnt'
Expand Down
2 changes: 1 addition & 1 deletion paddle/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ add_subdirectory(fluid)
# Now, we want to make all cc tests dynamically linked to the main paddle library,
# i.e., `libpaddle.so`, so we changes the logic of (2), (3), (4):
# (2) calling `cc_test()` in each `CMakeLists.txt` will not `exactly` add test, but
# record all tests and its source files, the action of add tests is defered to HERE.
# record all tests and its source files, the action of add tests is deferred to HERE.
# Why doing so? since the target of `libpaddle.so` is mostly the last target, and
# the tests should be added after that according to dependency.
# (3) the tests links dynamic libraries, `libpaddle.so`
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/ir/op_compat_sensible_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,9 @@ class InputOrOutputCompat {
* .AddInput("Bias").IsTensor().IsOptional().End()
* .AddOutput("Out").IsTensor().End()
*
* All the inference-aware Op defition is as above, all the other attributes not
* contained in the definition should be set default value or it would be judged
* incompatible.
* All the inference-aware Op definition is as above, all the other attributes
* not contained in the definition should be set default value or it would be
* judged incompatible.
*/
class OpCompat {
public:
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/dialect/op_generator/op_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -1283,7 +1283,7 @@ def AutoCodeGen(
ops_name_list = [] # all op class name store in this list
ops_declare_list = [] # all op class declare store in this list
ops_defined_list = [] # all op class defined store in this list
ops_vjp_defined_list = [] # all op vjp static interface defination
ops_vjp_defined_list = [] # all op vjp static interface definition

# (4) parse name of ops which have custom vjp rules
custom_vjp_op_name_list = []
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1856,14 +1856,14 @@ All parameter, weight, gradient are variables in Paddle.
return operators::ExtraInfoUtils::Instance().GetExtraAttrsMap(op_type);
});
m.def(
"get_attrtibute_type",
"get_attribute_type",
[](const std::string &op_type,
const std::string &attr_name) -> paddle::framework::proto::AttrType {
const auto &defalut_val =
const auto &default_val =
operators::ExtraInfoUtils::Instance().GetExtraAttrsMap(op_type).at(
attr_name);
return static_cast<paddle::framework::proto::AttrType>(
defalut_val.index() - 1);
default_val.index() - 1);
});
m.def("_add_skip_comp_ops", &paddle::prim::PrimCommonUtils::AddSkipCompOps);
m.def("_set_bwd_prim_blacklist",
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/api/generator/wrapped_infermeta_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def gene_wrapped_infermeta_and_register(api):
void {wrapped_infermeta_name}({", ".join(args)});
"""

defind_code = f"""
defined_code = f"""
void {wrapped_infermeta_name}({", ".join(args)}) {{
{api.infer_meta['func']}({", ".join(invoke_param)});
}}
Expand All @@ -89,7 +89,7 @@ def gene_wrapped_infermeta_and_register(api):
PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{get_wrapped_infermeta_name(api.kernel['func'][0])});"""

kernel_func_set.add(api.kernel['func'][0])
return declare_code, defind_code, register_code
return declare_code, defined_code, register_code
else:
return '', '', register_code
else:
Expand Down Expand Up @@ -156,11 +156,11 @@ def generate_wrapped_infermeta_and_register(
api_item = ForwardAPI(api)
(
declare_code,
defind_code,
defined_code,
register_code,
) = gene_wrapped_infermeta_and_register(api_item)
header_file.write(declare_code)
source_file.write(defind_code)
source_file.write(defined_code)
if infermeta_register_code.find(register_code) == -1:
infermeta_register_code = infermeta_register_code + register_code

Expand Down
12 changes: 6 additions & 6 deletions paddle/phi/api/profiler/host_event_recorder.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,27 +73,27 @@ class EventContainer {

private:
struct EventBlock {
union InitDeferedEvent {
InitDeferedEvent() {}
~InitDeferedEvent() {}
union InitDeferredEvent {
InitDeferredEvent() {}
~InitDeferredEvent() {}

EventType event;
};

static constexpr size_t kBlockSize = 1 << 24; // 16 MB
static constexpr size_t kAvailSize =
kBlockSize - sizeof(size_t) - sizeof(nullptr);
static constexpr size_t kNumEvents = kAvailSize / sizeof(InitDeferedEvent);
static constexpr size_t kNumEvents = kAvailSize / sizeof(InitDeferredEvent);
static constexpr size_t kPadSize =
kAvailSize - kNumEvents * sizeof(InitDeferedEvent);
kAvailSize - kNumEvents * sizeof(InitDeferredEvent);
static constexpr size_t kMinimumEventsPerBlock = 1024;
static_assert(
kNumEvents >= kMinimumEventsPerBlock,
"EventType is too large for kBlockSize, make kBlockSize larger");

size_t offset = 0;
EventBlock *next = nullptr;
InitDeferedEvent events[kNumEvents];
InitDeferredEvent events[kNumEvents];
char padding[kPadSize];
};
static_assert(sizeof(EventBlock) == EventBlock::kBlockSize,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/backends/dynload/mklrt.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ extern void* mklrt_dso_handle;
extern DynLoad__##__name __name

// mkl_dfti.h has a macro that shadows the function with the same name
// un-defeine this macro so as to export that function
// un-define this macro so as to export that function
#undef DftiCreateDescriptor

#define MKLDFTI_ROUTINE_EACH(__macro) \
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -3302,7 +3302,7 @@ def __init__(
worker_init_fn=dataloader.worker_init_fn,
persistent_workers=dataloader._persistent_workers,
)
# Note(lizhiyu): In dygraph mode, the flag "pin_memory" is defualt "True", but it decrease the speed of `AutoParallel`
# Note(lizhiyu): In dygraph mode, the flag "pin_memory" is default "True", but it decrease the speed of `AutoParallel`
self._dataloader.pin_memory = False

def _process_shard_dims(self, shard_dims):
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/quant/quantized_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def _get_arch_info():
"Paddle is not compiled with CUDA, we cannot get SMVersion from device, please try to compile Paddle with CUDA"
)
else:
# Defaut arch value for type checking.
# Default arch value for type checking.
return 0


Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ def log_normal(
std (float|Tensor, optional): The standard deviation of the output Tensor's underlying normal distribution.
If ``std`` is float, all elements of the output Tensor share the same standard deviation.
If ``std`` is a Tensor(data type supports float32, float64), it has per-element standard deviations.
Defaule is 2.0
Default is 2.0
shape (tuple|list|Tensor|None, optional): Shape of the Tensor to be created. The data type is ``int32`` or ``int64`` .
If ``shape`` is a list or tuple, each element of it should be integer or 0-D Tensor with shape [].
If ``shape`` is an Tensor, it should be an 1-D Tensor which represents a list. If ``mean`` or ``std``
Expand Down
2 changes: 1 addition & 1 deletion test/dygraph_to_static/test_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def setUp(self):
self.reset_flags_to_default()

def reset_flags_to_default(self):
# Reset flags to use defaut value
# Reset flags to use default value

# 1. A flag to set whether to open the dygraph2static error reporting module
os.environ[error.DISABLE_ERROR_ENV_NAME] = str(
Expand Down
2 changes: 1 addition & 1 deletion test/ir/pir/fused_pass/onednn/pass_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def setUpClass(self):
self.pir_program = None
self.places = []
self.skip_accuracy_verification = False
self.pass_attr_list = [] # pass_name:pass_attr(defalut:None)
self.pass_attr_list = [] # pass_name:pass_attr(default:None)

def run_pir_pass(self, program):
pm = pir.PassManager(opt_level=4)
Expand Down
2 changes: 1 addition & 1 deletion test/ir/pir/fused_pass/pass_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def setUpClass(self):
self.pir_program = None
self.places = []
self.skip_accuracy_verification = False
self.pass_attr_list = [] # pass_name:pass_attr(defalut:None)
self.pass_attr_list = [] # pass_name:pass_attr(default:None)

def run_pir_pass(self, program):
pm = pir.PassManager(opt_level=4)
Expand Down
2 changes: 1 addition & 1 deletion test/ir/pir/fused_pass/python/pass_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def setUpClass(self):
self.pir_program = None
self.places = []
self.skip_accuracy_verification = False
self.pass_attr_list = [] # pass_name:pass_attr(defalut:None)
self.pass_attr_list = [] # pass_name:pass_attr(default:None)

def run_pir_pass(self, program):
pm = pir.PassManager(opt_level=4)
Expand Down
2 changes: 1 addition & 1 deletion test/ir/pir/fused_pass/xpu/pass_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def setUpClass(self):
self.pir_program = None
self.places = []
self.skip_accuracy_verification = False
self.pass_attr_list = [] # pass_name:pass_attr(defalut:None)
self.pass_attr_list = [] # pass_name:pass_attr(default:None)

def run_pir_pass(self, program):
pm = pir.PassManager(opt_level=4)
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,11 +163,11 @@ def __call__(self, *args, **kwargs):
raise NotImplementedError(
f"A not supported attribute type: {attr.type}."
)
for attr_name, defalut_val in self.__extra_attrs__.items():
for attr_name, default_val in self.__extra_attrs__.items():
user_defined_attr = kwargs.get(attr_name, None)
if user_defined_attr is not None:
attr_type = int(
core.get_attrtibute_type(op_desc.type, attr_name)
core.get_attribute_type(op_desc.type, attr_name)
)
new_attr = op_desc.attrs.add()
new_attr.name = attr_name
Expand Down
10 changes: 5 additions & 5 deletions test/sot/test_05_dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def dict_test_fromkeys(x):


@check_no_breakgraph
def dict_test_fromkeys_defalut(x, y):
def dict_test_fromkeys_default(x, y):
d = dict.fromkeys(x, y)
return d

Expand Down Expand Up @@ -257,13 +257,13 @@ def test_dict_noargs(self):
def test_dict_fromkeys(self):
self.assert_results(dict_test_fromkeys, (1, 2, 3, 4))
self.assert_results(dict_test_fromkeys, [1, 2, 3, 4])
self.assert_results(dict_test_fromkeys_defalut, (1, 2, 3, 4), 1)
self.assert_results(dict_test_fromkeys_default, (1, 2, 3, 4), 1)
self.assert_results(
dict_test_fromkeys_defalut, (1, 2, 3, 4), paddle.to_tensor(1)
dict_test_fromkeys_default, (1, 2, 3, 4), paddle.to_tensor(1)
)
self.assert_results(dict_test_fromkeys_defalut, [1, 2, 3, 4], 1)
self.assert_results(dict_test_fromkeys_default, [1, 2, 3, 4], 1)
self.assert_results(
dict_test_fromkeys_defalut, [1, 2, 3, 4], paddle.to_tensor(1)
dict_test_fromkeys_default, [1, 2, 3, 4], paddle.to_tensor(1)
)

def test_dict_keyword_init(self):
Expand Down

0 comments on commit 12b4e5f

Please sign in to comment.