From 765bf2ec6b39e331c27c000a2734c2557e2936e6 Mon Sep 17 00:00:00 2001 From: Thomas Raoux Date: Sun, 24 Nov 2024 20:35:34 -0800 Subject: [PATCH] tweak pipeline heuristic --- .../TritonGPU/Transforms/LoopScheduling.cpp | 5 ++ test/TritonGPU/loop-schedule.mlir | 61 +++++++++++++++++++ 2 files changed, 66 insertions(+) create mode 100644 test/TritonGPU/loop-schedule.mlir diff --git a/lib/Dialect/TritonGPU/Transforms/LoopScheduling.cpp b/lib/Dialect/TritonGPU/Transforms/LoopScheduling.cpp index 9d6d903f4d2c..fea8540e78d7 100644 --- a/lib/Dialect/TritonGPU/Transforms/LoopScheduling.cpp +++ b/lib/Dialect/TritonGPU/Transforms/LoopScheduling.cpp @@ -56,6 +56,11 @@ loadOpsToIndirectionLevelAndUse(scf::ForOp forOp) { distance++; } for (Value operand : op->getOperands()) { + if (op->hasTrait()) { + // Heuristic: only pipeline A and B operands of the dot op. + if (operand == op->getOperand(2)) + continue; + } Value v = operand; Operation *defOp = v.getDefiningOp(); if (defOp && defOp->getBlock() == op->getBlock()) { diff --git a/test/TritonGPU/loop-schedule.mlir b/test/TritonGPU/loop-schedule.mlir new file mode 100644 index 000000000000..a73f70d3e3b6 --- /dev/null +++ b/test/TritonGPU/loop-schedule.mlir @@ -0,0 +1,61 @@ +// RUN: triton-opt %s -split-input-file -tritongpu-loop-scheduling=num-stages=3 | FileCheck %s + +#AL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}> +#BL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [1, 32], warpsPerCTA = [4, 1], order = [1, 0]}> +#C = #triton_gpu.nvidia_mma<{versionMajor = 2, warpsPerCTA = [4, 1]}> +#ALs0 = #triton_gpu.slice<{parent=#AL, dim=0}> +#BLs0 = #triton_gpu.slice<{parent=#BL, dim=0}> +#CLs0 = #triton_gpu.slice<{parent=#C, dim=0}> +#A = #triton_gpu.dot_op<{opIdx = 0, parent = #C, kWidth=2}> +#B = #triton_gpu.dot_op<{opIdx = 1, parent = #C, kWidth=2}> +module attributes {"triton_gpu.num-warps" = 4 : i32, "triton_gpu.num-ctas" = 1 : i32} { +// CHECK-LABLE: @matmul_loop_load_acc +// CHECK: tt.load %{{.*}} {loop.cluster = 3 : i32, loop.stage = 0 : i32} +// CHECK: tt.load %{{.*}} {loop.cluster = 3 : i32, loop.stage = 0 : i32} +// CHECK: tt.load %{{.*}} {loop.cluster = 1 : i32, loop.stage = 2 : i32} +// CHECK: tt.dot {{.*}} {loop.cluster = 1 : i32, loop.stage = 2 : i32} +tt.func @matmul_loop_load_acc(%lb : index, %ub : index, %step : index, + %A : !tt.ptr {tt.divisibility = 16 : i32}, + %B : !tt.ptr {tt.divisibility = 16 : i32}, + %C : !tt.ptr {tt.divisibility = 16 : i32}, + %c_init: tensor<128x128xf32, #C>) -> tensor<128x128xf32, #C> { + + // A ptrs + %a_ptr_splat = tt.splat %A : !tt.ptr -> tensor<128x32x!tt.ptr, #AL> + %a_tmp0 = tt.make_range {end = 32: i32, start = 0: i32} : tensor<32xi32, #ALs0> + %a_tmp1 = tt.expand_dims %a_tmp0 {axis = 0 : i32} : tensor<32xi32, #ALs0> -> tensor<1x32xi32, #AL> + %a_offs = tt.broadcast %a_tmp1 : tensor<1x32xi32, #AL> -> tensor<128x32xi32, #AL> + %a_ptr_init = tt.addptr %a_ptr_splat, %a_offs : tensor<128x32x!tt.ptr, #AL>, tensor<128x32xi32, #AL> + // B ptrs + %b_ptr_splat = tt.splat %B : !tt.ptr -> tensor<32x128x!tt.ptr, #BL> + %b_tmp0 = tt.make_range {end = 128: i32, start = 0: i32} : tensor<128xi32, #BLs0> + %b_tmp1 = tt.expand_dims %b_tmp0 {axis = 0 : i32} : tensor<128xi32, #BLs0> -> tensor<1x128xi32, #BL> + %b_offs = tt.broadcast %b_tmp1 : tensor<1x128xi32, #BL> -> tensor<32x128xi32, #BL> + %b_ptr_init = tt.addptr %b_ptr_splat, %b_offs : tensor<32x128x!tt.ptr, #BL>, tensor<32x128xi32, #BL> + // C ptrs + %c_ptr_splat = tt.splat %C : !tt.ptr -> tensor<128x128x!tt.ptr, #C> + %c_tmp0 = tt.make_range {end = 128: i32, start = 0: i32} : tensor<128xi32, #CLs0> + %c_tmp1 = tt.expand_dims %c_tmp0 {axis = 0 : i32} : tensor<128xi32, #CLs0> -> tensor<1x128xi32, #C> + %c_offs = tt.broadcast %c_tmp1 : tensor<1x128xi32, #C> -> tensor<128x128xi32, #C> + %c_ptr_init = tt.addptr %c_ptr_splat, %c_offs : tensor<128x128x!tt.ptr, #C>, tensor<128x128xi32, #C> + + %a_off = arith.constant dense<4> : tensor<128x32xi32, #AL> + %b_off = arith.constant dense<4> : tensor<32x128xi32, #BL> + %c_off = arith.constant dense<4> : tensor<128x128xi32, #C> + + %loop:4 = scf.for %iv = %lb to %ub step %step iter_args(%a_ptr = %a_ptr_init, %b_ptr = %b_ptr_init, %c_ptr = %c_ptr_init, %prev_c = %c_init) -> (tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128x!tt.ptr, #C>, tensor<128x128xf32, #C>) { + %a_ = tt.load %a_ptr : tensor<128x32x!tt.ptr, #AL> + %a = triton_gpu.convert_layout %a_ : tensor<128x32xf16, #AL> -> tensor<128x32xf16, #A> + %b_ = tt.load %b_ptr : tensor<32x128x!tt.ptr, #BL> + %b = triton_gpu.convert_layout %b_ : tensor<32x128xf16, #BL> -> tensor<32x128xf16, #B> + %c_ = tt.load %c_ptr : tensor<128x128x!tt.ptr, #C> + %c = tt.dot %a, %b, %prev_c : tensor<128x32xf16, #A> * tensor<32x128xf16, #B> -> tensor<128x128xf32, #C> + + %next_a_ptr = tt.addptr %a_ptr, %a_off : tensor<128x32x!tt.ptr, #AL>, tensor<128x32xi32, #AL> + %next_b_ptr = tt.addptr %b_ptr, %b_off : tensor<32x128x!tt.ptr, #BL>, tensor<32x128xi32, #BL> + %next_c_ptr = tt.addptr %c_ptr, %c_off : tensor<128x128x!tt.ptr, #C>, tensor<128x128xi32, #C> + scf.yield %next_a_ptr, %next_b_ptr, %next_c_ptr, %c : tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128x!tt.ptr, #C>, tensor<128x128xf32, #C> + } + tt.return %loop#3: tensor<128x128xf32, #C> +} +}