Skip to content

Commit

Permalink
[Tosa] Rename variables to coding style guideline (#69509)
Browse files Browse the repository at this point in the history
This patch fixes variable names in the style guide. Specifically, names
in the form xyz_abc are changed to the form xyzAbc

Signed-off-by: Tai Ly <[email protected]>
  • Loading branch information
Tai78641 authored Oct 18, 2023
1 parent 362b115 commit 3745e70
Show file tree
Hide file tree
Showing 3 changed files with 81 additions and 82 deletions.
14 changes: 7 additions & 7 deletions mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,24 +54,24 @@ LogicalResult reshapeLowerToHigher(PatternRewriter &rewriter, Location loc,
return rewriter.notifyMatchFailure(loc,
"cannot rewrite as its already correct");

Value input1_copy = input1;
Value input2_copy = input2;
if (EqualizeRanks(rewriter, loc, input1_copy, input2_copy).failed()) {
Value input1Copy = input1;
Value input2Copy = input2;
if (EqualizeRanks(rewriter, loc, input1Copy, input2Copy).failed()) {
return rewriter.notifyMatchFailure(loc, "failed to reshape inputs");
}

// Verify the rank agrees with the output type if the output type is ranked.
if (outputType) {
if (outputType.getRank() !=
llvm::cast<RankedTensorType>(input1_copy.getType()).getRank() ||
llvm::cast<RankedTensorType>(input1Copy.getType()).getRank() ||
outputType.getRank() !=
llvm::cast<RankedTensorType>(input2_copy.getType()).getRank())
llvm::cast<RankedTensorType>(input2Copy.getType()).getRank())
return rewriter.notifyMatchFailure(
loc, "the reshaped type doesn't agrees with the ranked output type");
}

input1 = input1_copy;
input2 = input2_copy;
input1 = input1Copy;
input2 = input2Copy;

return success();
}
Expand Down
141 changes: 70 additions & 71 deletions mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,58 +39,58 @@ using namespace mlir::tosa;
namespace {

static LogicalResult checkConstantOperandPad(Operation *op) {
if (auto pad_op = dyn_cast<tosa::PadOp>(op)) {
if (auto padOp = dyn_cast<tosa::PadOp>(op)) {
DenseElementsAttr paddings;
if (!matchPattern(pad_op.getPadding(), m_Constant(&paddings)))
if (!matchPattern(padOp.getPadding(), m_Constant(&paddings)))
return op->emitOpError("padding of pad is not constant");

DenseElementsAttr pad_const;
// Assume this op is zero-padding if pad_const is not presented.
if (pad_op.getPadConst() &&
!matchPattern(pad_op.getPadConst(), m_Constant(&pad_const)))
DenseElementsAttr padConst;
// Assume this op is zero-padding if padConst is not presented.
if (padOp.getPadConst() &&
!matchPattern(padOp.getPadConst(), m_Constant(&padConst)))
return op->emitOpError("pad_const of pad is not constant");
}
return success();
}

static LogicalResult checkConstantOperandTranspose(Operation *op) {
if (auto transpose_op = dyn_cast<tosa::TransposeOp>(op)) {
if (auto transposeOp = dyn_cast<tosa::TransposeOp>(op)) {
DenseElementsAttr perms;
if (!matchPattern(transpose_op.getPerms(), m_Constant(&perms)))
if (!matchPattern(transposeOp.getPerms(), m_Constant(&perms)))
return op->emitOpError("perms of transpose is not constant");
}
return success();
}

static LogicalResult checkConstantOperandFullyConnected(Operation *op) {
if (auto fc_op = dyn_cast<tosa::FullyConnectedOp>(op)) {
if (auto fcOp = dyn_cast<tosa::FullyConnectedOp>(op)) {
DenseElementsAttr weight;
if (!matchPattern(fc_op.getWeight(), m_Constant(&weight)))
if (!matchPattern(fcOp.getWeight(), m_Constant(&weight)))
return op->emitOpError("weight of fully_connected is not constant");

DenseElementsAttr bias;
if (!matchPattern(fc_op.getBias(), m_Constant(&bias)))
if (!matchPattern(fcOp.getBias(), m_Constant(&bias)))
return op->emitOpError("bias of fully_connected is not constant");
}
return success();
}

struct tosa_level_t {
struct TosaLevel {
int32_t MAX_RANK = 0;
int32_t MAX_KERNEL = 0;
int32_t MAX_STRIDE = 0;
int32_t MAX_SCALE = 0;

// @todo: MAX_LOG2_SIZE value and checks

bool operator==(const tosa_level_t &rhs) {
bool operator==(const TosaLevel &rhs) {
return MAX_RANK == rhs.MAX_RANK && MAX_KERNEL == rhs.MAX_KERNEL &&
MAX_STRIDE == rhs.MAX_STRIDE && MAX_SCALE == rhs.MAX_SCALE;
}
};

static constexpr tosa_level_t TOSA_LEVEL_EIGHTK = {6, 8192, 8192, 256};
static constexpr tosa_level_t TOSA_LEVEL_NONE = {0, 0, 0, 0};
static constexpr TosaLevel TOSA_LEVEL_EIGHTK = {6, 8192, 8192, 256};
static constexpr TosaLevel TOSA_LEVEL_NONE = {0, 0, 0, 0};

//===----------------------------------------------------------------------===//
// TOSA Validation Pass.
Expand All @@ -108,7 +108,7 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
void runOnOperation() final;

LogicalResult applyConstantOperandCheck(Operation *op) {
for (auto &checker : const_checkers) {
for (auto &checker : constCheckers) {
if (failed(checker(op)))
return failure();
}
Expand All @@ -122,43 +122,42 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {

private:
void populateConstantOperandChecks() {
const_checkers.emplace_back(checkConstantOperandPad);
const_checkers.emplace_back(checkConstantOperandTranspose);
const_checkers.emplace_back(checkConstantOperandFullyConnected);
constCheckers.emplace_back(checkConstantOperandPad);
constCheckers.emplace_back(checkConstantOperandTranspose);
constCheckers.emplace_back(checkConstantOperandFullyConnected);
}

bool levelCheckKernel(Operation *op, int32_t v,
const std::string &check_desc) {
if (v > tosa_level.MAX_KERNEL) {
op->emitOpError() << "failed level check: " << check_desc;
const std::string &checkDesc) {
if (v > tosaLevel.MAX_KERNEL) {
op->emitOpError() << "failed level check: " << checkDesc;
return false;
}
return true;
}

bool levelCheckStride(Operation *op, int32_t v,
const std::string &check_desc) {
if (v > tosa_level.MAX_STRIDE) {
op->emitOpError() << "failed level check: " << check_desc;
const std::string &checkDesc) {
if (v > tosaLevel.MAX_STRIDE) {
op->emitOpError() << "failed level check: " << checkDesc;
return false;
}
return true;
}

bool levelCheckScale(Operation *op, int32_t v,
const std::string &check_desc) {
if (v > tosa_level.MAX_SCALE) {
op->emitOpError() << "failed level check: " << check_desc;
bool levelCheckScale(Operation *op, int32_t v, const std::string &checkDesc) {
if (v > tosaLevel.MAX_SCALE) {
op->emitOpError() << "failed level check: " << checkDesc;
return false;
}
return true;
}

bool levelCheckRank(Operation *op, const Value &v,
const std::string &check_desc) {
const std::string &checkDesc) {
if (ShapedType type = dyn_cast<ShapedType>(v.getType())) {
if (type.getRank() > tosa_level.MAX_RANK) {
op->emitOpError() << "failed level check: " << check_desc;
if (type.getRank() > tosaLevel.MAX_RANK) {
op->emitOpError() << "failed level check: " << checkDesc;
return false;
}
}
Expand All @@ -182,8 +181,8 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
}

bool levelCheckRanks(Operation *op) {
#define CHECK_RANKS_FOR(tosa_op) \
if (!levelCheckRanksFor<tosa_op##Op>(op)) \
#define CHECK_RANKS_FOR(tosaOp) \
if (!levelCheckRanksFor<tosaOp##Op>(op)) \
return false;

// tensor operators:
Expand Down Expand Up @@ -257,18 +256,18 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
// Pool Op: level check kernel/stride/pad values
template <typename T>
bool levelCheckPool(Operation *op) {
if (auto pool_op = dyn_cast<T>(op)) {
for (auto k : pool_op.getKernel()) {
if (auto poolOp = dyn_cast<T>(op)) {
for (auto k : poolOp.getKernel()) {
if (!levelCheckKernel(op, k, "kernel <= MAX_KERNEL")) {
return false;
}
}
for (auto s : pool_op.getStride()) {
for (auto s : poolOp.getStride()) {
if (!levelCheckStride(op, s, "stride <= MAX_STRIDE")) {
return false;
}
}
for (auto p : pool_op.getPad()) {
for (auto p : poolOp.getPad()) {
if (!levelCheckKernel(op, p, "pad <= MAX_KERNEL")) {
return false;
}
Expand All @@ -280,27 +279,27 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
// Conv Op: level check dilation/stride/pad values
template <typename T>
bool levelCheckConv(Operation *op) {
if (auto conv_op = dyn_cast<T>(op)) {
if (auto convOp = dyn_cast<T>(op)) {

for (auto k : conv_op.getDilation()) {
for (auto k : convOp.getDilation()) {
if (!levelCheckKernel(op, k, "dilation <= MAX_KERNEL")) {
return false;
}
}
for (auto p : conv_op.getPad()) {
for (auto p : convOp.getPad()) {
if (!levelCheckKernel(op, p, "pad <= MAX_KERNEL")) {
return false;
}
}
for (auto s : conv_op.getStride()) {
for (auto s : convOp.getStride()) {
if (!levelCheckStride(op, s, "stride <= MAX_STRIDE")) {
return false;
}
}
auto dilation = conv_op.getDilation();
if (ShapedType weight_type =
auto dilation = convOp.getDilation();
if (ShapedType weightType =
dyn_cast<ShapedType>(op->getOperand(1).getType())) {
auto shape = weight_type.getShape();
auto shape = weightType.getShape();
if (isa<tosa::Conv2DOp>(op)) {
assert(shape.size() == 4);
assert(dilation.size() == 2);
Expand Down Expand Up @@ -354,9 +353,9 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
// TransposeConv2d op: level check kH/kW, outpad, and stride
bool levelCheckTransposeConv2d(Operation *op) {
if (auto transpose = dyn_cast<tosa::TransposeConv2DOp>(op)) {
if (ShapedType filter_type =
if (ShapedType filterType =
transpose.getFilter().getType().dyn_cast<ShapedType>()) {
auto shape = filter_type.getShape();
auto shape = filterType.getShape();
assert(shape.size() == 4);
// level check kernel sizes for kH and KW
if (!levelCheckKernel(op, shape[1], "KH <= MAX_KERNEL") ||
Expand All @@ -382,13 +381,13 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
bool levelCheckResize(Operation *op) {
if (auto resize = dyn_cast<tosa::ResizeOp>(op)) {
auto scale = resize.getScale();
int16_t scale_y_n = scale[0];
int16_t scale_y_d = scale[1];
int16_t scale_x_n = scale[2];
int16_t scale_x_d = scale[3];
if (!levelCheckScale(op, scale_y_n / scale_y_d,
int16_t scaleYN = scale[0];
int16_t scaleYD = scale[1];
int16_t scaleXN = scale[2];
int16_t scaleXD = scale[3];
if (!levelCheckScale(op, scaleYN / scaleYD,
"scale_y_n/scale_y_d <= MAX_SCALE") ||
!levelCheckScale(op, scale_x_n / scale_x_d,
!levelCheckScale(op, scaleXN / scaleXD,
"scale_x_n/scale_x_d <= MAX_SCALE")) {
return false;
}
Expand All @@ -399,22 +398,22 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
// configure profile and level values from pass options profileName and
// levelName
void configLevelAndProfile() {
tosa_level = TOSA_LEVEL_NONE;
tosaLevel = TOSA_LEVEL_NONE;
if (level == TosaLevelEnum::EightK) {
tosa_level = TOSA_LEVEL_EIGHTK;
tosaLevel = TOSA_LEVEL_EIGHTK;
}
}

bool CheckVariable(Operation *op);
bool CheckVariableReadOrWrite(Operation *op);

SmallVector<std::function<LogicalResult(Operation *)>> const_checkers;
tosa_level_t tosa_level;
DenseMap<StringAttr, mlir::Type> variables_map;
SmallVector<std::function<LogicalResult(Operation *)>> constCheckers;
TosaLevel tosaLevel;
DenseMap<StringAttr, mlir::Type> variablesMap;
};

LogicalResult TosaValidation::applyLevelCheck(Operation *op) {
if (tosa_level == TOSA_LEVEL_NONE) {
if (tosaLevel == TOSA_LEVEL_NONE) {
// no need to do level checks
return success();
}
Expand All @@ -439,24 +438,24 @@ LogicalResult TosaValidation::applyLevelCheck(Operation *op) {
}

inline bool CompatibleTypes(const mlir::Type &type,
const mlir::Type &declared_type) {
const mlir::Type &declaredType) {
// for now, simply use type equality comparison
return type == declared_type;
return type == declaredType;
}

bool TosaValidation::CheckVariable(Operation *op) {
if (isa<mlir::tosa::VariableOp>(op)) {
auto name_attr = cast<mlir::StringAttr>(op->getAttr("name"));
auto nameAttr = cast<mlir::StringAttr>(op->getAttr("name"));

if (variables_map.count(name_attr)) {
if (variablesMap.count(nameAttr)) {
op->emitOpError() << "name has already been declared";
return false;
}

auto type_attr = cast<mlir::TypeAttr>(op->getAttr("type"));
mlir::Type type = type_attr.getValue();
auto typeAttr = cast<mlir::TypeAttr>(op->getAttr("type"));
mlir::Type type = typeAttr.getValue();

variables_map[name_attr] = type;
variablesMap[nameAttr] = type;
}

return true;
Expand All @@ -465,26 +464,26 @@ bool TosaValidation::CheckVariable(Operation *op) {
bool TosaValidation::CheckVariableReadOrWrite(Operation *op) {
if (isa<mlir::tosa::VariableReadOp>(op) ||
isa<mlir::tosa::VariableWriteOp>(op)) {
auto name_attr = cast<mlir::StringAttr>(op->getAttr("name"));
auto nameAttr = cast<mlir::StringAttr>(op->getAttr("name"));

if (!variables_map.count(name_attr)) {
if (!variablesMap.count(nameAttr)) {
op->emitOpError() << "name has not been declared";
return false;
}

auto var_type = variables_map[name_attr];
auto varType = variablesMap[nameAttr];

for (auto v : op->getOperands()) {
auto type = v.getType();
if (!CompatibleTypes(type, var_type)) {
if (!CompatibleTypes(type, varType)) {
op->emitOpError() << "operand type does not equal variable type";
return false;
}
}

for (auto v : op->getResults()) {
auto type = v.getType();
if (!CompatibleTypes(type, var_type)) {
if (!CompatibleTypes(type, varType)) {
op->emitOpError() << "result type does not equal variable type";
return false;
}
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,10 @@ void mlir::tosa::computeMultiplierAndShift(double scale, int32_t &multiplier,
}
}

#define GET_UQTYPE(input_type) \
(llvm::dyn_cast<quant::UniformQuantizedType>((input_type).getElementType()))
#define GET_QTYPE(input_type) \
(llvm::dyn_cast<quant::QuantizedType>((input_type).getElementType()))
#define GET_UQTYPE(inputType) \
(llvm::dyn_cast<quant::UniformQuantizedType>((inputType).getElementType()))
#define GET_QTYPE(inputType) \
(llvm::dyn_cast<quant::QuantizedType>((inputType).getElementType()))

/// Method to build ConvOpQuantizationAttr, called from
/// ConvOpQuantInfoBuilder/TransConvOpQuantInfoBuilder:
Expand Down

0 comments on commit 3745e70

Please sign in to comment.