Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][D-2,F-4,F-18,F-25,I-43,O-3,O-6,O-8,S-20,T-10,T-22,U-14,W-4,W-8,W-12,W-18] Ignore 1-3 letter words to reduce false positives #70623

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 7 additions & 42 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,45 +4,36 @@ extend-exclude = [
"third_party",
"patches",
"build",
"test/dataset/imikolov_test.py",
]

[default]
# Ignore 1-3 letter words, refer to https://github.com/crate-ci/typos/issues/1079
extend-ignore-words-re = ["^[a-zA-Z]{1,3}$"]

[default.extend-words]
# PaddlePaddle specific words
anc = 'anc'
arange = "arange"
astroid = 'astroid'
ba = 'ba'
cacl = 'cacl'
CANN = 'CANN'
Clas = 'Clas'
clen = 'clen'
cll = 'cll'
datas = 'datas'
dota = 'dota'
dout = "dout"
eles = 'eles'
entrys = 'entrys'
FUL = 'FUL'
feeded = 'feeded'
grad = "grad"
Halfs = 'Halfs'
hge = 'hge'
kinf = 'kinf'
kow = 'kow'
lod = "lod"
Nce = "Nce"
Nd = "Nd"
ot = 'ot'
pash = 'pash'
UE = "UE"
unpacket = "unpacket"

# These words need to be fixed
Creater = 'Creater'
creater = 'creater'
dateset = 'dateset'
dota = 'dota'
Datas = 'Datas'
DATAS = 'DATAS'
datas = 'datas'
delt = 'delt'
dealed = 'dealed'
deciamls = 'deciamls'
Expand All @@ -61,7 +52,6 @@ downsteram = 'downsteram'
faild = 'faild'
Flase = 'Flase'
featue = 'featue'
feeded = 'feeded'
fetchs = 'fetchs'
fileds = 'fileds'
filterd = 'filterd'
Expand All @@ -77,7 +67,6 @@ filpped = 'filpped'
flaot = 'flaot'
follwed = 'follwed'
folowing = 'folowing'
fot = 'fot'
formater = 'formater'
formating = 'formating'
foramt = 'foramt'
Expand All @@ -88,7 +77,6 @@ forwad = 'forwad'
forword = 'forword'
founf = 'founf'
framwork = 'framwork'
fre = 'fre'
frequence = 'frequence'
fron = 'fron'
fullfill = 'fullfill'
Expand Down Expand Up @@ -151,17 +139,11 @@ invalide = 'invalide'
iteratable = 'iteratable'
interated = 'interated'
Iteraion = 'Iteraion'
IIT = 'IIT'
iy = 'iy'
occured = 'occured'
Ocurred = 'Ocurred'
occures = 'occures'
Fo = 'Fo'
fo = 'fo'
offets = 'offets'
offseted = 'offseted'
OLT = 'OLT'
pn = 'pn'
Operants = 'Operants'
operants = 'operants'
oeprations = 'oeprations'
Expand Down Expand Up @@ -323,10 +305,7 @@ sequnece = 'sequnece'
sequentail = 'sequentail'
serailize = 'serailize'
settting = 'settting'
ser = 'ser'
setted = 'setted'
SEH = 'SEH'
seh = 'seh'
shoule = 'shoule'
shoud = 'shoud'
Singal = 'Singal'
Expand Down Expand Up @@ -396,14 +375,9 @@ Temperary = 'Temperary'
tenosr = 'tenosr'
iterm = 'iterm'
termiante = 'termiante'
ths = 'ths'
thi = 'thi'
Theoritical = 'Theoritical'
ther = 'ther'
thr = 'thr'
thge = 'thge'
Teh = 'Teh'
tne = 'tne'
thouse = 'thouse'
theads = 'theads'
thrads = 'thrads'
Expand All @@ -416,7 +390,6 @@ TOOD = 'TOOD'
tood = 'tood'
TDOD = 'TDOD'
toghether = 'toghether'
Rto = 'Rto'
trainning = 'trainning'
Traning = 'Traning'
transforme = 'transforme'
Expand Down Expand Up @@ -460,7 +433,6 @@ unziped = 'unziped'
udpated = 'udpated'
upgarde = 'upgarde'
uptream = 'uptream'
ues = 'ues'
unsed = 'unsed'
uesd = 'uesd'
usefull = 'usefull'
Expand All @@ -481,27 +453,20 @@ varn = 'varn'
warpped = 'warpped'
warpper = 'warpper'
Warpper = 'Warpper'
vas = 'vas'
wheather = 'wheather'
wether = 'wether'
Wether = 'Wether'
wieghts = 'wieghts'
werid = 'werid'
whe = 'whe'
Wheter = 'Wheter'
whther = 'whther'
whill = 'whill'
whos = 'whos'
wil = 'wil'
wll = 'wll'
wiil = 'wiil'
witk = 'witk'
Wih = 'Wih'
wih = 'wih'
worke = 'worke'
workround = 'workround'
worksapce = 'worksapce'
wrk = 'wrk'
wrappered = 'wrappered'
wraper = 'wraper'
wraping = 'wraping'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ void TensorRTEngineInstruction::BindInputTensor(
"have >0 elements, but now have %d elements. "
"It's likely that this tensor is connected to a Concat op inside "
"a trt-subgraph, "
"try to ues API to forbid this op into trt-subgraph.",
"try to use API to forbid this op into trt-subgraph.",
input_name,
input_tensor.numel()));

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/tensorrt/tensorrt_engine_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -593,7 +593,7 @@ class TensorRTEngineOp : public framework::OperatorBase {
"have >0 elements, but now have %d elements. "
"It's likely that this tensor is connected to a Concat op inside "
"a trt-subgraph, "
"try to ues API to forbid this op into trt-subgraph.",
"try to use API to forbid this op into trt-subgraph.",
x,
t.numel()));

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/memory/allocation/allocator_facade.cc
Original file line number Diff line number Diff line change
Expand Up @@ -825,7 +825,7 @@ class AllocatorFacadePrivate {
void InitNaiveBestFitCPUAllocator() {
#if defined(__APPLE__) && defined(__arm64__)
// NOTE(wuweilong): It is more efficient to use CPUAllocator directly,
// but it wll cause some problem in Mac OS m1 chip, so we use
// but it will cause some problem in Mac OS m1 chip, so we use
// NaiveBestFitAllocator instead.
allocators_[phi::CPUPlace()] =
std::make_shared<NaiveBestFitAllocator>(phi::CPUPlace());
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/blas/blas.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ namespace funcs {
* `batch_size` times of GEMM. The batched GEMM could be faster base on the
* implementation of the blas library. The batch size could be zero. If any
* matrix of `matmul` has a batch size, there will be a batched GEMM, too. e.g.,
* Mat A is [BatchSize, H1, W2], and Mat B [H2, W2], The result matrix wil be
* Mat A is [BatchSize, H1, W2], and Mat B [H2, W2], The result matrix will be
* [BatchSize, H1, W2]
*
* The boolean flag, `trans`, describe the memory is the transpose of matrix or
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def shard_tensor(x, process_mesh=None, shard_spec=None):
current process mesh cannot be found. Default: None.
shard_spec (list, optional): a list to describe the sharding mapping between `x` and `process_mesh`,
which means the dimension `i` of `x` is split across the dimension `shard_spec[i]` of `process_mesh`,
where `None` means that tensor dimension is not split. For example, given a tensor wih
where `None` means that tensor dimension is not split. For example, given a tensor with
the shape [6, 12] and a process mesh with the shape [2, 3] and the dimension names ["x", "y"]:
If `shard_spec=["x", "y"]`, each shard of the tensor will have a shape [3, 4];
If `shard_spec=["y", "x"]`, each shard of the tensor will have a shape [2, 6];
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/io/dataloader/dataloader_iter.py
Original file line number Diff line number Diff line change
Expand Up @@ -590,7 +590,7 @@ def _try_shutdown_all(self, timeout=None):
self._clear_and_remove_data_queue()

# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# to indices_queue, workers will exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
Expand Down
Loading