Skip to content

Commit

Permalink
[CodeStyle][Typos][C-[39-41],C-[43-47]] Fix typos (configed,`config…
Browse files Browse the repository at this point in the history
…urated`,`conficte`,`conjuction`,`consequtive`,`consistant`,`contraints`,`contruction`,`consructor`) (#69855)
  • Loading branch information
rich04lin authored Dec 2, 2024
1 parent 1cc1d8b commit 4409040
Show file tree
Hide file tree
Showing 11 changed files with 13 additions and 22 deletions.
9 changes: 0 additions & 9 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -71,15 +71,6 @@ comple = 'comple'
complition = 'complition'
complext = 'complext'
compsite = 'compsite'
configurated = 'configurated'
configed = 'configed'
confict = 'confict'
conjuction = 'conjuction'
consequtive = 'consequtive'
consistant = 'consistant'
contraints = 'contraints'
contruction = 'contruction'
consructor = 'consructor'
comsume = 'comsume'
Continer = 'Continer'
contenst = 'contenst'
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/ir/group_schedule/search/config_searcher.cc
Original file line number Diff line number Diff line change
Expand Up @@ -202,15 +202,15 @@ bool CandidateGenerator::IsValid(const CandidateType& candidate) const {
ScheduleConfigSearcher::ScheduleConfigSearcher(
std::vector<std::unique_ptr<BaseObjectiveFunc>> objective_funcs,
const std::vector<std::pair<int, int>>& candidate_range,
const std::vector<ConstraintFunc>& contraints)
const std::vector<ConstraintFunc>& constraints)
: objective_funcs_(std::move(objective_funcs)),
candidate_range_(candidate_range),
contraints_(contraints) {}
constraints_(constraints) {}

std::pair<ScoreType, CandidateType> ScheduleConfigSearcher::Search(
bool is_search_minimun) {
VLOG(6) << "Start Search...";
CandidateGenerator candidate_generator(candidate_range_, contraints_);
CandidateGenerator candidate_generator(candidate_range_, constraints_);
std::vector<CandidateType> candidates = candidate_generator.Candidates();
VLOG(6) << "Candidate num = " << candidates.size();
for (const auto& candidate : candidates) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/ir/group_schedule/search/config_searcher.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,13 +85,13 @@ class ScheduleConfigSearcher {
ScheduleConfigSearcher(
std::vector<std::unique_ptr<BaseObjectiveFunc>> objective_funcs,
const std::vector<std::pair<int, int>>& candidate_range,
const std::vector<ConstraintFunc>& contraints = {});
const std::vector<ConstraintFunc>& constraints = {});

std::pair<ScoreType, CandidateType> Search(bool is_search_minimun = true);

private:
std::vector<std::unique_ptr<BaseObjectiveFunc>> objective_funcs_;
std::vector<ConstraintFunc> contraints_;
std::vector<ConstraintFunc> constraints_;
std::vector<std::pair<int, int>> candidate_range_;

std::map<ScoreType, CandidateType> records_;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/thirdparty/round_robin.h
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ static Counts &counts() {
#define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1
#endif

// detect if MSVC supports the pair(std::piecewise_construct_t,...) consructor
// detect if MSVC supports the pair(std::piecewise_construct_t,...) constructor
// being constexpr
#ifdef _MSC_VER
#if _MSC_VER <= 1900
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fusion_group/subgraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ class SubGraph {
}

std::vector<Node*> GetOutputVarNodes(bool with_intermediate_out) {
// The order of output nodes should be consistant anywhere..
// The order of output nodes should be consistent anywhere..
std::vector<Node*> output_vars;
for (auto* n : SortedNodes()) {
if (IsOutputOfInternalOp(n)) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/spmd_rules/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ std::unordered_map<std::string, int64_t> ShardingMergeForTensors(
}
}

// Resolute "mesh_dim shard by more than one axis" confict.
// Resolute "mesh_dim shard by more than one axis" conflict.
// Now we just naive pick the first axis naively.
// (TODO) use local cost model to pick the axis with lowest cost(in concern of
// memory or communication or computation).
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/dims_simplifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ struct BroadcastDimsSimplifier {
}
ExtendInputDimensions(axis);

// To Merge the dimensions of input_tensors while the consequtive
// To Merge the dimensions of input_tensors while the consecutive
// equal-dimensions appears. Example below :
// in_1.shape = [2, 3, 4, 5] in_1.shape = [2, 12, 5]
// in_2.shape = [1, 3, 4, 5] -> in_2.shape = [1, 12, 5]
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/rms_norm_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -568,7 +568,7 @@ inline GPU(Error_t)
}
}

static const bool max_smem_configed = [=]() {
static const bool max_smem_configured = [=]() {
int max_smem_size = 0;
GPU(Error_t)
err = GPU(DeviceGetAttribute)(
Expand Down
2 changes: 1 addition & 1 deletion paddle/utils/tribool.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ inline tribool operator!(tribool x) {
}

/**
* \brief Computes the logical conjuction of two tribools
* \brief Computes the logical conjunction of two tribools
*
* \returns the result of logically ANDing the two tribool values,
* according to the following table:
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/phi/kernels/sequence_pooling_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ void TestSequencePoolingSum(const DeviceContext &context,
common::make_ddim({static_cast<int64_t>(lod[0].back()), second_dim});
in_grad.mutable_data<T>(in_dims, place);

// check tensor contruction result
// check tensor construction result
PADDLE_ENFORCE_EQ(
in_grad.dims().size(),
out_grad.dims().size(),
Expand Down
2 changes: 1 addition & 1 deletion test/deprecated/book/test_recommender_system_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@

def get_usr_combined_features():
# FIXME(dzh) : old API integer_value(10) may has range check.
# currently we don't have user configurated check.
# currently we don't have user configured check.

USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1

Expand Down

0 comments on commit 4409040

Please sign in to comment.