Skip to content

Commit

Permalink
Guard tuner attributes with flag
Browse files Browse the repository at this point in the history
Signed-off-by: nithinsubbiah <[email protected]>
  • Loading branch information
nithinsubbiah committed Dec 4, 2024
1 parent 53c15ff commit 9d91a9d
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -60,14 +60,3 @@ func.func @matmul_128x1024x256_1(%lhs : tensor<128x256xf32>, %rhs: tensor<256x10

// CHECK-LABEL: func.func @matmul_128x1024x256_1
// CHECK-NOT: iree_codegen.lowering_config

// -----

#config = #iree_codegen.lowering_config<tile_sizes = [[128, 256], [16, 16]]>
func.func @matmul_128x1024x256_1(%lhs : tensor<128x256xf32>, %rhs: tensor<256x1024xf32>, %init: tensor<128x1024xf32>) -> tensor<128x1024xf32> {
%result = linalg.matmul {lowering_config = #iree_codegen.lowering_config<tile_sizes = [[128, 256], [16, 16]]>, root_op} ins(%lhs, %rhs : tensor<128x256xf32>, tensor<256x1024xf32>) outs(%init : tensor<128x1024xf32>) -> tensor<128x1024xf32>
return %result : tensor<128x1024xf32>
}

// CHECK-LABEL: func.func @matmul_128x1024x256_1
// CHECK: linalg.matmul {root_op}
Original file line number Diff line number Diff line change
Expand Up @@ -570,8 +570,7 @@ void eraseCompilationInfo(Operation *op) {
}

//===----------------------------------------------------------------------===//
// Helpers for setting `iree_codegen.root_op` attribute on root operations for
// tuner.
// Helpers for setting attributes for tuner.
// ===----------------------------------------------------------------------===//

void setRootOpInfo(Operation *op) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

#include "iree/compiler/Codegen/Dialect/Codegen/IR/IREECodegenInterfaces.h"
#include "iree/compiler/Dialect/HAL/IR/HALOps.h"
#include "llvm/Support/CommandLine.h"
#include "mlir/Dialect/SCF/IR/DeviceMappingInterface.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
Expand All @@ -25,6 +26,11 @@ using TileSizesListTypeRef = ArrayRef<SmallVector<int64_t>>;
/// Typedef for scalable tile flags at different levels of tiling.
using ScalableTileFlagsListType = SmallVector<SmallVector<bool>>;
using ScalableTileFlagsListTypeRef = ArrayRef<SmallVector<bool>>;
/// Flag to add attributes for tuner.
inline llvm::cl::opt<bool>
clSetTunerAttr("iree-config-add-tuner-attributes",
llvm::cl::desc("add attributes for tuner"),
llvm::cl::init(false));
} // namespace mlir::iree_compiler

// clang-format off
Expand Down Expand Up @@ -118,7 +124,9 @@ SmallVector<Value> getTileSizes(OpBuilder &b, Operation *op, unsigned level);
void setLoweringConfig(Operation *op, Attribute config);

/// Sets an attribute to identify the rootOp and adds any information needed for
/// the tuner from compiler. Currently, only sets a `UnitAttr`.
/// the tuner from compiler. Currently, only sets a `UnitAttr`. Note that this
/// attribute is not used by the compiler at any level and is only intended for
/// tuner use.
void setRootOpInfo(Operation *op);

/// Convenience function that sets the lowering configuration on the operation
Expand All @@ -127,7 +135,9 @@ inline LogicalResult setOpConfigAndEntryPointFnTranslation(
mlir::FunctionOpInterface entryPointFn, Operation *op,
IREE::Codegen::LoweringConfigAttrInterface config,
IREE::Codegen::TranslationInfoAttr translationInfo) {
setRootOpInfo(op);
if (clSetTunerAttr) {
setRootOpInfo(op);
}
if (config) {
setLoweringConfig(op, config);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1638,7 +1638,7 @@ func.func @pad_only() attributes {hal.executable.target = #executable_target_emb
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK: tensor.pad {{.+}} {
// CHECK: tensor.yield
// CHECK-NEXT: } {lowering_config = #[[CONFIG]], root_op}
// CHECK-NEXT: } {lowering_config = #[[CONFIG]]}

// -----

Expand All @@ -1665,7 +1665,7 @@ func.func @winograd_output_transform() attributes {hal.executable.target = #exec
// CHECK: func.func @winograd_output_transform()
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK: iree_linalg_ext.winograd.output_transform
// CHECK-SAME: {lowering_config = #[[CONFIG]], root_op}
// CHECK-SAME: {lowering_config = #[[CONFIG]]}

// -----

Expand All @@ -1692,7 +1692,7 @@ func.func @winograd_input_transform() attributes {hal.executable.target = #execu
// CHECK: func.func @winograd_input_transform()
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK: iree_linalg_ext.winograd.input_transform
// CHECK-SAME: {lowering_config = #[[CONFIG]], root_op}
// CHECK-SAME: {lowering_config = #[[CONFIG]]}

// -----

Expand All @@ -1719,7 +1719,7 @@ func.func @winograd_filter_transform() attributes {hal.executable.target = #exec
// CHECK: func.func @winograd_filter_transform()
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK: iree_linalg_ext.winograd.filter_transform
// CHECK-SAME: {lowering_config = #[[CONFIG]], root_op}
// CHECK-SAME: {lowering_config = #[[CONFIG]]}

// -----

Expand Down Expand Up @@ -1762,7 +1762,7 @@ func.func @attention() attributes {hal.executable.target = #executable_target_em
// CHECK: func.func @attention()
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK: iree_linalg_ext.attention
// CHECK-SAME: lowering_config = #[[CONFIG]], root_op
// CHECK-SAME: lowering_config = #[[CONFIG]]

// -----

Expand Down Expand Up @@ -1801,7 +1801,7 @@ func.func @elementwise_output_transposed() attributes {hal.executable.target = #
// CHECK: func.func @elementwise_output_transposed()
// CHECK-SAME: translation_info = #[[TRANSLATION]]
// CHECK: linalg.generic
// CHECK-SAME: {lowering_config = #[[CONFIG]], root_op}
// CHECK-SAME: {lowering_config = #[[CONFIG]]}

// -----

Expand Down Expand Up @@ -1886,7 +1886,7 @@ func.func @custom_op(%arg0 : tensor<384x512xf32>, %arg1 : tensor<512x128xf32>,
// CHECK: func @custom_op(
// CHECK-SAME: translation_info = #translation
// CHECK: iree_linalg_ext.custom_op
// CHECK-SAME: attributes {lowering_config = #[[CONFIG0]], root_op}
// CHECK-SAME: attributes {lowering_config = #[[CONFIG0]]}
// CHECK: ^bb
// CHECK: linalg.fill
// CHECK-SAME: {lowering_config = #[[CONFIG1]]}
Expand Down Expand Up @@ -1957,7 +1957,7 @@ func.func @test_tiling_cpu_default(%arg0: tensor<256x256xi8>, %arg1: tensor<256x
// CHECK-DAG: #[[TRANSLATION_INFO]] = #iree_codegen.translation_info<pipeline = CPUDefault>
// CHECK: func @test_tiling_cpu_default(
// CHECK-SAME: translation_info = #[[TRANSLATION_INFO]]
// CHECK: linalg.quantized_matmul {lowering_config = #[[CONFIG0]], root_op}
// CHECK: linalg.quantized_matmul {lowering_config = #[[CONFIG0]]}

// -----

Expand All @@ -1982,7 +1982,7 @@ func.func @i1_type() attributes {hal.executable.target = #executable_target_emb
// CHECK-DAG: #[[CONFIG:.+]] = #iree_codegen.lowering_config<tile_sizes = {{\[}}[8], [8], [0], [0]]>
// CHECK: func @i1_type()
// CHECK: linalg.generic {
// CHECK-SAME: {lowering_config = #[[CONFIG]], root_op}
// CHECK-SAME: {lowering_config = #[[CONFIG]]}

// -----
#pipeline_layout = #hal.pipeline.layout<bindings = [
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: iree-opt --split-input-file --iree-gpu-test-target=gfx942 --pass-pipeline='builtin.module(iree-llvmgpu-select-lowering-strategy)' %s | FileCheck %s
// RUN: iree-opt --split-input-file --iree-gpu-test-target=gfx942 --iree-config-add-tuner-attributes --pass-pipeline='builtin.module(iree-llvmgpu-select-lowering-strategy)' %s | FileCheck %s

func.func @matmul(%lhs: tensor<4x4xf32>, %rhs: tensor<4x4xf32>) -> tensor<4x4xf32> {
%c0 = arith.constant 0.0 : f32
Expand Down

0 comments on commit 9d91a9d

Please sign in to comment.