Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][I-11] Fix typos(indexs,Indexs) (part 1) #70809

Merged
merged 6 commits into from
Jan 15, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion _typos.toml
enkilee marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ pash = 'pash'
unpacket = "unpacket"

# These words need to be fixed
Indexs = 'Indexs'
indexs = 'indexs'
enkilee marked this conversation as resolved.
Show resolved Hide resolved
Infered = 'Infered'
infered = 'infered'
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/adt/print_utils/print_equations.cc
Original file line number Diff line number Diff line change
Expand Up @@ -206,10 +206,10 @@ struct ToTxtStringStruct {
tOut<OpArgIndexes<std::optional<Index>>>,
tIn<OpArgIndexes<Index>>>& in_msg2out_msg) const {
std::string ret;
const auto& [out_op, out_indexs, in_indexs] = in_msg2out_msg.tuple();
const auto& [out_op, out_indices, in_indices] = in_msg2out_msg.tuple();
const FakeOpPlaceHolder& op = out_op.value();
const auto& out_index_tuple = out_indexs.value();
const auto& in_index_tuple = in_indexs.value();
const auto& out_index_tuple = out_indices.value();
const auto& in_index_tuple = in_indices.value();
const auto& [out_msg_list_in, out_msg_list_out] = out_index_tuple.tuple();
const auto& [in_msg_list_in, in_msg_list_out] = in_index_tuple.tuple();
ret += ToTxtString(op) + ", ";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ Equation EraseIndexes(
return ret_equation;
}

std::vector<Index> GenerateWriteBroadcastTensorIndexs(
std::vector<Index> GenerateWriteBroadcastTensorIndices(
const std::shared_ptr<config::NaiveOpEquationContext>& ctx,
const Equations& in_msg2out_msg_equations) {
const auto& equation_graph_view =
Expand Down Expand Up @@ -137,10 +137,10 @@ WriteBroadcastDisabledBidirectionEquationGenerator::GetDirectionEquations()
const std::shared_ptr<config::NaiveOpEquationContext>& ctx) {
const auto& in_msg2out_msg_equations =
naive_bidirection_equation_generator_.equations();
const auto& truncated_output_tensor_idxes =
GenerateWriteBroadcastTensorIndexs(ctx, in_msg2out_msg_equations);
const auto& truncated_output_tensor_indices =
GenerateWriteBroadcastTensorIndices(ctx, in_msg2out_msg_equations);
ret->emplace_back(EraseIndexes(in_msg2out_msg_equations->at(idx),
truncated_output_tensor_idxes));
truncated_output_tensor_indices));
});
return ret;
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/hlir/framework/pir/op_mapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ enum MapperType {
};

class OpMapper {
using OperandIndexsFunction = std::function<std::vector<size_t>()>;
using OperandIndicesFunction = std::function<std::vector<size_t>()>;
using AppendAttrFunction =
std::function<void(const ::pir::Operation& op,
utils::AttributeMap& attrs)>; // NOLINT
Expand All @@ -59,7 +59,7 @@ class OpMapper {
has(op, MapperType::OPERAND),
true,
::common::errors::PreconditionNotMet(
"Not register OperandIndexsFunction for %s", op.name().c_str()));
"Not register OperandIndicesFunction for %s", op.name().c_str()));
std::vector<::pir::Value> inputs;
for (auto idx : operand_funcs_.at(op.name())()) {
inputs.push_back(op.operand_source(idx));
Expand All @@ -81,7 +81,7 @@ class OpMapper {
OpMapper() { RegisterMapRules(); }
void RegisterMapRules();

std::unordered_map<std::string, OperandIndexsFunction> operand_funcs_;
std::unordered_map<std::string, OperandIndicesFunction> operand_funcs_;
std::unordered_map<std::string, AppendAttrFunction> attr_funcs_;
};

Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/hlir/pe/elementwise.cc
Original file line number Diff line number Diff line change
Expand Up @@ -163,11 +163,11 @@ ir::Tensor Squeeze(const ir::Tensor& A,
auto res = Compute(
output_shape,
[=](const std::vector<Expr>& indices) {
std::vector<Expr> indexs(A->shape.size(), Expr(0));
std::vector<Expr> out_indices(A->shape.size(), Expr(0));
for (int idx = 0; idx < indices.size(); ++idx) {
indexs[position[idx]] = indices[idx];
out_indices[position[idx]] = indices[idx];
}
return A(indexs);
return A(out_indices);
},
output_name);
return res;
Expand Down
125 changes: 63 additions & 62 deletions paddle/cinn/hlir/pe/reduction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -330,17 +330,17 @@ std::vector<Tensor> WarpReduce(const ir::Tensor& A,
tmp_shape.push_back(Expr(32));
auto tmp_out = Compute(
tmp_shape,
[=](const std::vector<Expr>& indexs) -> Expr {
std::vector<Expr> tmp_indexs(indexs.begin(),
indexs.begin() + indexs.size() - 1);
[=](const std::vector<Expr>& indices) -> Expr {
std::vector<Expr> tmp_indices(indices.begin(),
indices.begin() + indices.size() - 1);
for (int idx = 0; idx < last_reduce_dim_num; ++idx) {
tmp_indexs.push_back(Expr(0));
tmp_indices.push_back(Expr(0));
}
PADDLE_ENFORCE_EQ(A->shape.size(),
tmp_indexs.size(),
tmp_indices.size(),
::common::errors::InvalidArgument(
"Indexs size is not equal to Input shape!"));
Expr offset = cinn::common::IndiceToAbsOffset(A->shape, tmp_indexs);
"indices size is not equal to Input shape!"));
Expr offset = cinn::common::IndiceToAbsOffset(A->shape, tmp_indices);
return lang::CallExtern(reduce_type, {A, offset, reduce_width});
},
UniqName(output_name + "_" + reduce_type));
Expand All @@ -357,11 +357,11 @@ std::vector<Tensor> WarpReduce(const ir::Tensor& A,
}
auto out = Compute(
out_shape,
[=](const std::vector<Expr>& indexs) -> Expr {
std::vector<Expr> tmp_indexs(
indexs.begin(), indexs.begin() + shape_size_without_reduce_dim);
tmp_indexs.push_back(Expr(0));
return tmp_out(tmp_indexs);
[=](const std::vector<Expr>& indices) -> Expr {
std::vector<Expr> tmp_indices(
indices.begin(), indices.begin() + shape_size_without_reduce_dim);
tmp_indices.push_back(Expr(0));
return tmp_out(tmp_indices);
},
output_name);

Expand Down Expand Up @@ -431,22 +431,22 @@ std::vector<ir::Tensor> BlockReduceInternal(const ir::Tensor& A,

auto tmp_out = Compute(
tmp_shape,
[=](const std::vector<Expr>& indexs) -> Expr {
[=](const std::vector<Expr>& indices) -> Expr {
// compute index map from output to input.
auto last_index = indexs.back();
std::vector<Expr> input_indexs(indexs.begin(),
indexs.begin() + indexs.size() - 1);
auto last_index = indices.back();
std::vector<Expr> input_indices(indices.begin(),
indices.begin() + indices.size() - 1);
for (int idx = 0; idx < A->shape.size() - axes.front(); ++idx) {
input_indexs.push_back(last_index / last_reduce_stride[idx]);
input_indices.push_back(last_index / last_reduce_stride[idx]);
last_index = last_index % last_reduce_stride[idx];
}

// checkout input_indexs size equals input shape
PADDLE_ENFORCE_EQ(input_indexs.size(),
// checkout input_indices size equals input shape
PADDLE_ENFORCE_EQ(input_indices.size(),
A->shape.size(),
::common::errors::InvalidArgument(
"Indexs size is not equal to Input shape!"));
return lang::CallExtern(reduce_type, {A(input_indexs)});
"indices size is not equal to Input shape!"));
return lang::CallExtern(reduce_type, {A(input_indices)});
},
UniqName(output_name + "_tmp"));

Expand All @@ -464,11 +464,11 @@ std::vector<ir::Tensor> BlockReduceInternal(const ir::Tensor& A,
}
auto out = Compute(
out_shape,
[=](const std::vector<Expr>& indexs) -> Expr {
std::vector<Expr> tmp_indexs(indexs.begin(),
indexs.begin() + axes.front());
tmp_indexs.push_back(Expr(0));
return tmp_out(tmp_indexs);
[=](const std::vector<Expr>& indices) -> Expr {
std::vector<Expr> tmp_indices(indices.begin(),
indices.begin() + axes.front());
tmp_indices.push_back(Expr(0));
return tmp_out(tmp_indices);
},
output_name);
return {out, tmp_out};
Expand Down Expand Up @@ -566,19 +566,19 @@ std::vector<ir::Tensor> BlockReduce(const ir::Tensor& A,
tmp_shape.push_back(Expr(block_size));
auto tmp_out = Compute(
tmp_shape,
[=](const std::vector<Expr>& indexs) -> Expr {
std::vector<Expr> tmp_indexs(indexs.begin(),
indexs.begin() + axes.front());
[=](const std::vector<Expr>& indices) -> Expr {
std::vector<Expr> tmp_indices(indices.begin(),
indices.begin() + axes.front());
for (int idx = 0; idx < A->shape.size() - axes.front(); ++idx) {
tmp_indexs.push_back(Expr(0));
tmp_indices.push_back(Expr(0));
}
// checkout input shape size equals tmp indexs size.
// checkout input shape size equals tmp indices size.
PADDLE_ENFORCE_EQ(A->shape.size(),
tmp_indexs.size(),
tmp_indices.size(),
::common::errors::InvalidArgument(
"Indexs size is not equal to Input shape!"));
"indices size is not equal to Input shape!"));
// compute offset.
Expr offset = cinn::common::IndiceToAbsOffset(A->shape, tmp_indexs);
Expr offset = cinn::common::IndiceToAbsOffset(A->shape, tmp_indices);
// call block reduce sum
return lang::CallExtern(reduce_type, {A, offset, reduce_width});
},
Expand All @@ -598,12 +598,12 @@ std::vector<ir::Tensor> BlockReduce(const ir::Tensor& A,
}
auto out = Compute(
out_shape,
[=](const std::vector<Expr>& indexs) -> Expr {
[=](const std::vector<Expr>& indices) -> Expr {
// compute input index
std::vector<Expr> tmp_indexs(indexs.begin(),
indexs.begin() + axes.front());
tmp_indexs.push_back(Expr(0));
return tmp_out(tmp_indexs);
std::vector<Expr> tmp_indices(indices.begin(),
indices.begin() + axes.front());
tmp_indices.push_back(Expr(0));
return tmp_out(tmp_indices);
},
output_name);

Expand Down Expand Up @@ -756,32 +756,33 @@ std::vector<ir::Tensor> ReduceInternal(const ir::Tensor& A,
[](int val) { return ir::Expr(val); });
return Compute(
reshape_output_shape,
[=](const std::vector<Expr>& indexs) -> Expr {
[=](const std::vector<Expr>& indices) -> Expr {
// index is last axis in axes and index is last axis >= tail.
auto selected = ir::And::Make(
ir::EQ::Make(indexs[axis], ir::Expr(reduce_shape[axis] - 1)),
ir::GE::Make(indexs[axis + 1], ir::Expr(tail)));
auto index = indexs[axis] * ir::Expr(reshape_output_shape[axis + 1]) +
indexs[axis + 1];
ir::EQ::Make(indices[axis], ir::Expr(reduce_shape[axis] - 1)),
ir::GE::Make(indices[axis + 1], ir::Expr(tail)));
auto index =
indices[axis] * ir::Expr(reshape_output_shape[axis + 1]) +
indices[axis + 1];

// first part index
std::vector<ir::Expr> tmp_indexs(indexs.begin(),
indexs.begin() + axes[axis_index]);
std::vector<ir::Expr> tmp_indices(indices.begin(),
indices.begin() + axes[axis_index]);
// second part index
for (int idx = 0; idx < strides.size(); ++idx) {
tmp_indexs.push_back(index / strides[idx]);
tmp_indices.push_back(index / strides[idx]);
index = index % strides[idx];
}
// third part index
for (int idx = axis + 2; idx < indexs.size(); ++idx) {
tmp_indexs.push_back(indexs[idx]);
for (int idx = axis + 2; idx < indices.size(); ++idx) {
tmp_indices.push_back(indices[idx]);
}

PADDLE_ENFORCE_EQ(tmp_indexs.size(),
PADDLE_ENFORCE_EQ(tmp_indices.size(),
A->shape.size(),
::common::errors::InvalidArgument(
"Indexs size is not equal to Input shape!"));
return ir::Select::Make(selected, A(tmp_indexs), initial);
"indices size is not equal to Input shape!"));
return ir::Select::Make(selected, A(tmp_indices), initial);
},
UniqName(output_name + "_reshape"));
};
Expand Down Expand Up @@ -1001,27 +1002,27 @@ std::vector<ir::Tensor> TwoStepBlockReduceInternal(
}
auto out = Compute(
out_shape,
[=](const std::vector<Expr>& indexs) -> Expr {
[=](const std::vector<Expr>& indices) -> Expr {
Expr index =
indexs[size_without_tail - 1] +
indexs[size_without_tail - 2] * out_shape[size_without_tail - 1];
std::vector<Expr> tmp_indexs(indexs.begin(),
indexs.begin() + size_without_tail - 2);
indices[size_without_tail - 1] +
indices[size_without_tail - 2] * out_shape[size_without_tail - 1];
std::vector<Expr> tmp_indices(
indices.begin(), indices.begin() + size_without_tail - 2);
// last and the second of last.
auto selected = ir::LT::Make(index, Expr(lane));
for (auto tail_stride : tail_strides) {
tmp_indexs.push_back(index / Expr(tail_stride));
tmp_indices.push_back(index / Expr(tail_stride));
index = index % Expr(tail_stride);
}

PADDLE_ENFORCE_EQ(tmp_indexs.size(),
PADDLE_ENFORCE_EQ(tmp_indices.size(),
A->shape.size(),
::common::errors::InvalidArgument(
"Indexs size is not equal to Input shape!"));
"indices size is not equal to Input shape!"));
if (check_bound) {
return ir::Select::Make(selected, A(tmp_indexs), initial);
return ir::Select::Make(selected, A(tmp_indices), initial);
} else {
return A(tmp_indexs);
return A(tmp_indices);
}
},
UniqName(output_name + "_reshape"));
Expand Down
16 changes: 8 additions & 8 deletions paddle/cinn/hlir/pe/transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1200,12 +1200,12 @@ ir::Tensor Reverse(const ir::Tensor& input,
std::vector<Expr> shape = input->shape;
return lang::Compute(
input->shape,
[=](const std::vector<Expr>& indice) {
std::vector<Expr> indexs(indice.begin(), indice.end());
[=](const std::vector<Expr>& indices) {
std::vector<Expr> out_indices(indices.begin(), indices.end());
for (auto idx : axis) {
indexs[idx] = shape[idx] - Expr(1) - indexs[idx];
out_indices[idx] = shape[idx] - Expr(1) - out_indices[idx];
}
return input(indexs);
return input(out_indices);
},
output_name);
}
Expand Down Expand Up @@ -1256,12 +1256,12 @@ ir::Tensor Transpose(const ir::Tensor& input,

return lang::Compute(
output_shape,
[=](const std::vector<Expr>& indice) {
std::vector<Expr> indexs;
[=](const std::vector<Expr>& indices) {
std::vector<Expr> out_indices;
for (auto idx : new_axis) {
indexs.push_back(indice[idx]);
out_indices.push_back(indices[idx]);
}
return input(indexs);
return input(out_indices);
},
output_name);
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/hlir/pe/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ ir::Tensor Gather(const ir::Tensor& x,
* @brief Perform meta op ScatterAssign
* @param input The input tensor
* @param assign The assign tensor
* @param indexs The indexs tensor
* @param index The index tensor
* @param output_name the name of the output tensor
*/
ir::Tensor ScatterAssign(
Expand All @@ -248,7 +248,7 @@ ir::Tensor ScatterAssign(
* @brief Perform meta op ScatterAdd
* @param input The input tensor
* @param updates The updates tensor
* @param indexs The indexs tensor
* @param index The index tensor
* @param output_name the name of the output tensor
*/
ir::Tensor ScatterAdd(const ir::Tensor& input,
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/pybind/slice_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -354,12 +354,12 @@ static void ParseIndex(const paddle::Tensor& tensor,
}

// valid_index is the number of dimensions exclude None index
const int valid_indexs = size - none_axes->size() - ell_count;
PADDLE_ENFORCE_EQ(valid_indexs <= rank,
const int valid_indices = size - none_axes->size() - ell_count;
PADDLE_ENFORCE_EQ(valid_indices <= rank,
true,
common::errors::InvalidArgument(
"Too many indices (%d) for tensor of dimension %d.",
valid_indexs,
valid_indices,
rank));
}

Expand Down
Loading
Loading