Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Tosa] Rename variables to coding style guideline #69509

Merged
merged 1 commit into from
Oct 18, 2023

Conversation

Tai78641
Copy link
Contributor

This patch fixes variable names in the style guide. Specifically, names in the form xyz_abc are changed to the form xyzAbc

Change-Id: Ifceca302eb8c32b2153f7f6439cf64d81ac9ea4e

This patch fixes variable names in the style guide.
Specifically, names in the form xyz_abc are changed to the form xyzAbc

Signed-off-by: Tai Ly <[email protected]>
Change-Id: Ifceca302eb8c32b2153f7f6439cf64d81ac9ea4e
@llvmbot
Copy link
Member

llvmbot commented Oct 18, 2023

@llvm/pr-subscribers-mlir-tosa

@llvm/pr-subscribers-mlir

Author: Tai Ly (Tai78641)

Changes

This patch fixes variable names in the style guide. Specifically, names in the form xyz_abc are changed to the form xyzAbc

Change-Id: Ifceca302eb8c32b2153f7f6439cf64d81ac9ea4e


Full diff: https://github.com/llvm/llvm-project/pull/69509.diff

3 Files Affected:

  • (modified) mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp (+7-7)
  • (modified) mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp (+70-71)
  • (modified) mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp (+4-4)
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp
index 94cbb0afd274445..18bc7d6aa9ee6a8 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp
@@ -54,24 +54,24 @@ LogicalResult reshapeLowerToHigher(PatternRewriter &rewriter, Location loc,
     return rewriter.notifyMatchFailure(loc,
                                        "cannot rewrite as its already correct");
 
-  Value input1_copy = input1;
-  Value input2_copy = input2;
-  if (EqualizeRanks(rewriter, loc, input1_copy, input2_copy).failed()) {
+  Value input1Copy = input1;
+  Value input2Copy = input2;
+  if (EqualizeRanks(rewriter, loc, input1Copy, input2Copy).failed()) {
     return rewriter.notifyMatchFailure(loc, "failed to reshape inputs");
   }
 
   // Verify the rank agrees with the output type if the output type is ranked.
   if (outputType) {
     if (outputType.getRank() !=
-            llvm::cast<RankedTensorType>(input1_copy.getType()).getRank() ||
+            llvm::cast<RankedTensorType>(input1Copy.getType()).getRank() ||
         outputType.getRank() !=
-            llvm::cast<RankedTensorType>(input2_copy.getType()).getRank())
+            llvm::cast<RankedTensorType>(input2Copy.getType()).getRank())
       return rewriter.notifyMatchFailure(
           loc, "the reshaped type doesn't agrees with the ranked output type");
   }
 
-  input1 = input1_copy;
-  input2 = input2_copy;
+  input1 = input1Copy;
+  input2 = input2Copy;
 
   return success();
 }
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
index d973ac9cae2e842..8a2254fc24effe2 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
@@ -39,43 +39,43 @@ using namespace mlir::tosa;
 namespace {
 
 static LogicalResult checkConstantOperandPad(Operation *op) {
-  if (auto pad_op = dyn_cast<tosa::PadOp>(op)) {
+  if (auto padOp = dyn_cast<tosa::PadOp>(op)) {
     DenseElementsAttr paddings;
-    if (!matchPattern(pad_op.getPadding(), m_Constant(&paddings)))
+    if (!matchPattern(padOp.getPadding(), m_Constant(&paddings)))
       return op->emitOpError("padding of pad is not constant");
 
-    DenseElementsAttr pad_const;
-    // Assume this op is zero-padding if pad_const is not presented.
-    if (pad_op.getPadConst() &&
-        !matchPattern(pad_op.getPadConst(), m_Constant(&pad_const)))
+    DenseElementsAttr padConst;
+    // Assume this op is zero-padding if padConst is not presented.
+    if (padOp.getPadConst() &&
+        !matchPattern(padOp.getPadConst(), m_Constant(&padConst)))
       return op->emitOpError("pad_const of pad is not constant");
   }
   return success();
 }
 
 static LogicalResult checkConstantOperandTranspose(Operation *op) {
-  if (auto transpose_op = dyn_cast<tosa::TransposeOp>(op)) {
+  if (auto transposeOp = dyn_cast<tosa::TransposeOp>(op)) {
     DenseElementsAttr perms;
-    if (!matchPattern(transpose_op.getPerms(), m_Constant(&perms)))
+    if (!matchPattern(transposeOp.getPerms(), m_Constant(&perms)))
       return op->emitOpError("perms of transpose is not constant");
   }
   return success();
 }
 
 static LogicalResult checkConstantOperandFullyConnected(Operation *op) {
-  if (auto fc_op = dyn_cast<tosa::FullyConnectedOp>(op)) {
+  if (auto fcOp = dyn_cast<tosa::FullyConnectedOp>(op)) {
     DenseElementsAttr weight;
-    if (!matchPattern(fc_op.getWeight(), m_Constant(&weight)))
+    if (!matchPattern(fcOp.getWeight(), m_Constant(&weight)))
       return op->emitOpError("weight of fully_connected is not constant");
 
     DenseElementsAttr bias;
-    if (!matchPattern(fc_op.getBias(), m_Constant(&bias)))
+    if (!matchPattern(fcOp.getBias(), m_Constant(&bias)))
       return op->emitOpError("bias of fully_connected is not constant");
   }
   return success();
 }
 
-struct tosa_level_t {
+struct TosaLevel {
   int32_t MAX_RANK = 0;
   int32_t MAX_KERNEL = 0;
   int32_t MAX_STRIDE = 0;
@@ -83,14 +83,14 @@ struct tosa_level_t {
 
   // @todo: MAX_LOG2_SIZE value and checks
 
-  bool operator==(const tosa_level_t &rhs) {
+  bool operator==(const TosaLevel &rhs) {
     return MAX_RANK == rhs.MAX_RANK && MAX_KERNEL == rhs.MAX_KERNEL &&
            MAX_STRIDE == rhs.MAX_STRIDE && MAX_SCALE == rhs.MAX_SCALE;
   }
 };
 
-static constexpr tosa_level_t TOSA_LEVEL_EIGHTK = {6, 8192, 8192, 256};
-static constexpr tosa_level_t TOSA_LEVEL_NONE = {0, 0, 0, 0};
+static constexpr TosaLevel TOSA_LEVEL_EIGHTK = {6, 8192, 8192, 256};
+static constexpr TosaLevel TOSA_LEVEL_NONE = {0, 0, 0, 0};
 
 //===----------------------------------------------------------------------===//
 // TOSA Validation Pass.
@@ -108,7 +108,7 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
   void runOnOperation() final;
 
   LogicalResult applyConstantOperandCheck(Operation *op) {
-    for (auto &checker : const_checkers) {
+    for (auto &checker : constCheckers) {
       if (failed(checker(op)))
         return failure();
     }
@@ -122,43 +122,42 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
 
 private:
   void populateConstantOperandChecks() {
-    const_checkers.emplace_back(checkConstantOperandPad);
-    const_checkers.emplace_back(checkConstantOperandTranspose);
-    const_checkers.emplace_back(checkConstantOperandFullyConnected);
+    constCheckers.emplace_back(checkConstantOperandPad);
+    constCheckers.emplace_back(checkConstantOperandTranspose);
+    constCheckers.emplace_back(checkConstantOperandFullyConnected);
   }
 
   bool levelCheckKernel(Operation *op, int32_t v,
-                        const std::string &check_desc) {
-    if (v > tosa_level.MAX_KERNEL) {
-      op->emitOpError() << "failed level check: " << check_desc;
+                        const std::string &checkDesc) {
+    if (v > tosaLevel.MAX_KERNEL) {
+      op->emitOpError() << "failed level check: " << checkDesc;
       return false;
     }
     return true;
   }
 
   bool levelCheckStride(Operation *op, int32_t v,
-                        const std::string &check_desc) {
-    if (v > tosa_level.MAX_STRIDE) {
-      op->emitOpError() << "failed level check: " << check_desc;
+                        const std::string &checkDesc) {
+    if (v > tosaLevel.MAX_STRIDE) {
+      op->emitOpError() << "failed level check: " << checkDesc;
       return false;
     }
     return true;
   }
 
-  bool levelCheckScale(Operation *op, int32_t v,
-                       const std::string &check_desc) {
-    if (v > tosa_level.MAX_SCALE) {
-      op->emitOpError() << "failed level check: " << check_desc;
+  bool levelCheckScale(Operation *op, int32_t v, const std::string &checkDesc) {
+    if (v > tosaLevel.MAX_SCALE) {
+      op->emitOpError() << "failed level check: " << checkDesc;
       return false;
     }
     return true;
   }
 
   bool levelCheckRank(Operation *op, const Value &v,
-                      const std::string &check_desc) {
+                      const std::string &checkDesc) {
     if (ShapedType type = dyn_cast<ShapedType>(v.getType())) {
-      if (type.getRank() > tosa_level.MAX_RANK) {
-        op->emitOpError() << "failed level check: " << check_desc;
+      if (type.getRank() > tosaLevel.MAX_RANK) {
+        op->emitOpError() << "failed level check: " << checkDesc;
         return false;
       }
     }
@@ -182,8 +181,8 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
   }
 
   bool levelCheckRanks(Operation *op) {
-#define CHECK_RANKS_FOR(tosa_op)                                               \
-  if (!levelCheckRanksFor<tosa_op##Op>(op))                                    \
+#define CHECK_RANKS_FOR(tosaOp)                                                \
+  if (!levelCheckRanksFor<tosaOp##Op>(op))                                     \
     return false;
 
     // tensor operators:
@@ -257,18 +256,18 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
   // Pool Op: level check kernel/stride/pad values
   template <typename T>
   bool levelCheckPool(Operation *op) {
-    if (auto pool_op = dyn_cast<T>(op)) {
-      for (auto k : pool_op.getKernel()) {
+    if (auto poolOp = dyn_cast<T>(op)) {
+      for (auto k : poolOp.getKernel()) {
         if (!levelCheckKernel(op, k, "kernel <= MAX_KERNEL")) {
           return false;
         }
       }
-      for (auto s : pool_op.getStride()) {
+      for (auto s : poolOp.getStride()) {
         if (!levelCheckStride(op, s, "stride <= MAX_STRIDE")) {
           return false;
         }
       }
-      for (auto p : pool_op.getPad()) {
+      for (auto p : poolOp.getPad()) {
         if (!levelCheckKernel(op, p, "pad <= MAX_KERNEL")) {
           return false;
         }
@@ -280,27 +279,27 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
   // Conv Op: level check dilation/stride/pad values
   template <typename T>
   bool levelCheckConv(Operation *op) {
-    if (auto conv_op = dyn_cast<T>(op)) {
+    if (auto convOp = dyn_cast<T>(op)) {
 
-      for (auto k : conv_op.getDilation()) {
+      for (auto k : convOp.getDilation()) {
         if (!levelCheckKernel(op, k, "dilation <= MAX_KERNEL")) {
           return false;
         }
       }
-      for (auto p : conv_op.getPad()) {
+      for (auto p : convOp.getPad()) {
         if (!levelCheckKernel(op, p, "pad <= MAX_KERNEL")) {
           return false;
         }
       }
-      for (auto s : conv_op.getStride()) {
+      for (auto s : convOp.getStride()) {
         if (!levelCheckStride(op, s, "stride <= MAX_STRIDE")) {
           return false;
         }
       }
-      auto dilation = conv_op.getDilation();
-      if (ShapedType weight_type =
+      auto dilation = convOp.getDilation();
+      if (ShapedType weightType =
               dyn_cast<ShapedType>(op->getOperand(1).getType())) {
-        auto shape = weight_type.getShape();
+        auto shape = weightType.getShape();
         if (isa<tosa::Conv2DOp>(op)) {
           assert(shape.size() == 4);
           assert(dilation.size() == 2);
@@ -354,9 +353,9 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
   // TransposeConv2d op: level check kH/kW, outpad, and stride
   bool levelCheckTransposeConv2d(Operation *op) {
     if (auto transpose = dyn_cast<tosa::TransposeConv2DOp>(op)) {
-      if (ShapedType filter_type =
+      if (ShapedType filterType =
               transpose.getFilter().getType().dyn_cast<ShapedType>()) {
-        auto shape = filter_type.getShape();
+        auto shape = filterType.getShape();
         assert(shape.size() == 4);
         // level check kernel sizes for kH and KW
         if (!levelCheckKernel(op, shape[1], "KH <= MAX_KERNEL") ||
@@ -382,13 +381,13 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
   bool levelCheckResize(Operation *op) {
     if (auto resize = dyn_cast<tosa::ResizeOp>(op)) {
       auto scale = resize.getScale();
-      int16_t scale_y_n = scale[0];
-      int16_t scale_y_d = scale[1];
-      int16_t scale_x_n = scale[2];
-      int16_t scale_x_d = scale[3];
-      if (!levelCheckScale(op, scale_y_n / scale_y_d,
+      int16_t scaleYN = scale[0];
+      int16_t scaleYD = scale[1];
+      int16_t scaleXN = scale[2];
+      int16_t scaleXD = scale[3];
+      if (!levelCheckScale(op, scaleYN / scaleYD,
                            "scale_y_n/scale_y_d <= MAX_SCALE") ||
-          !levelCheckScale(op, scale_x_n / scale_x_d,
+          !levelCheckScale(op, scaleXN / scaleXD,
                            "scale_x_n/scale_x_d <= MAX_SCALE")) {
         return false;
       }
@@ -399,22 +398,22 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
   // configure profile and level values from pass options profileName and
   // levelName
   void configLevelAndProfile() {
-    tosa_level = TOSA_LEVEL_NONE;
+    tosaLevel = TOSA_LEVEL_NONE;
     if (level == TosaLevelEnum::EightK) {
-      tosa_level = TOSA_LEVEL_EIGHTK;
+      tosaLevel = TOSA_LEVEL_EIGHTK;
     }
   }
 
   bool CheckVariable(Operation *op);
   bool CheckVariableReadOrWrite(Operation *op);
 
-  SmallVector<std::function<LogicalResult(Operation *)>> const_checkers;
-  tosa_level_t tosa_level;
-  DenseMap<StringAttr, mlir::Type> variables_map;
+  SmallVector<std::function<LogicalResult(Operation *)>> constCheckers;
+  TosaLevel tosaLevel;
+  DenseMap<StringAttr, mlir::Type> variablesMap;
 };
 
 LogicalResult TosaValidation::applyLevelCheck(Operation *op) {
-  if (tosa_level == TOSA_LEVEL_NONE) {
+  if (tosaLevel == TOSA_LEVEL_NONE) {
     // no need to do level checks
     return success();
   }
@@ -439,24 +438,24 @@ LogicalResult TosaValidation::applyLevelCheck(Operation *op) {
 }
 
 inline bool CompatibleTypes(const mlir::Type &type,
-                            const mlir::Type &declared_type) {
+                            const mlir::Type &declaredType) {
   // for now, simply use type equality comparison
-  return type == declared_type;
+  return type == declaredType;
 }
 
 bool TosaValidation::CheckVariable(Operation *op) {
   if (isa<mlir::tosa::VariableOp>(op)) {
-    auto name_attr = cast<mlir::StringAttr>(op->getAttr("name"));
+    auto nameAttr = cast<mlir::StringAttr>(op->getAttr("name"));
 
-    if (variables_map.count(name_attr)) {
+    if (variablesMap.count(nameAttr)) {
       op->emitOpError() << "name has already been declared";
       return false;
     }
 
-    auto type_attr = cast<mlir::TypeAttr>(op->getAttr("type"));
-    mlir::Type type = type_attr.getValue();
+    auto typeAttr = cast<mlir::TypeAttr>(op->getAttr("type"));
+    mlir::Type type = typeAttr.getValue();
 
-    variables_map[name_attr] = type;
+    variablesMap[nameAttr] = type;
   }
 
   return true;
@@ -465,18 +464,18 @@ bool TosaValidation::CheckVariable(Operation *op) {
 bool TosaValidation::CheckVariableReadOrWrite(Operation *op) {
   if (isa<mlir::tosa::VariableReadOp>(op) ||
       isa<mlir::tosa::VariableWriteOp>(op)) {
-    auto name_attr = cast<mlir::StringAttr>(op->getAttr("name"));
+    auto nameAttr = cast<mlir::StringAttr>(op->getAttr("name"));
 
-    if (!variables_map.count(name_attr)) {
+    if (!variablesMap.count(nameAttr)) {
       op->emitOpError() << "name has not been declared";
       return false;
     }
 
-    auto var_type = variables_map[name_attr];
+    auto varType = variablesMap[nameAttr];
 
     for (auto v : op->getOperands()) {
       auto type = v.getType();
-      if (!CompatibleTypes(type, var_type)) {
+      if (!CompatibleTypes(type, varType)) {
         op->emitOpError() << "operand type does not equal variable type";
         return false;
       }
@@ -484,7 +483,7 @@ bool TosaValidation::CheckVariableReadOrWrite(Operation *op) {
 
     for (auto v : op->getResults()) {
       auto type = v.getType();
-      if (!CompatibleTypes(type, var_type)) {
+      if (!CompatibleTypes(type, varType)) {
         op->emitOpError() << "result type does not equal variable type";
         return false;
       }
diff --git a/mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp b/mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp
index eb81446caacaba5..5c546f59cde4132 100644
--- a/mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp
+++ b/mlir/lib/Dialect/Tosa/Utils/QuantUtils.cpp
@@ -107,10 +107,10 @@ void mlir::tosa::computeMultiplierAndShift(double scale, int32_t &multiplier,
   }
 }
 
-#define GET_UQTYPE(input_type)                                                 \
-  (llvm::dyn_cast<quant::UniformQuantizedType>((input_type).getElementType()))
-#define GET_QTYPE(input_type)                                                  \
-  (llvm::dyn_cast<quant::QuantizedType>((input_type).getElementType()))
+#define GET_UQTYPE(inputType)                                                  \
+  (llvm::dyn_cast<quant::UniformQuantizedType>((inputType).getElementType()))
+#define GET_QTYPE(inputType)                                                   \
+  (llvm::dyn_cast<quant::QuantizedType>((inputType).getElementType()))
 
 /// Method to build ConvOpQuantizationAttr, called from
 /// ConvOpQuantInfoBuilder/TransConvOpQuantInfoBuilder:

Copy link
Collaborator

@joker-eph joker-eph left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks!

@joker-eph
Copy link
Collaborator

Change-Id: Ifceca302eb8c32b2153f7f6439cf64d81ac9ea4e

I'm not sure what this is? Maybe something you can strip before merging?

@eric-k256
Copy link
Contributor

Change-Id: Ifceca302eb8c32b2153f7f6439cf64d81ac9ea4e

I'm not sure what this is? Maybe something you can strip before merging?

I'll strip it with the merge.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

4 participants