diff --git a/compiler-rt/lib/hwasan/hwasan_report.cpp b/compiler-rt/lib/hwasan/hwasan_report.cpp index f74e26af70a0f..0590e231448c0 100644 --- a/compiler-rt/lib/hwasan/hwasan_report.cpp +++ b/compiler-rt/lib/hwasan/hwasan_report.cpp @@ -323,6 +323,21 @@ static uptr GetGlobalSizeFromDescriptor(uptr ptr) { void ReportStats() {} +constexpr uptr kDumpWidth = 16; +constexpr uptr kShadowLines = 17; +constexpr uptr kShadowDumpSize = kShadowLines * kDumpWidth; + +constexpr uptr kShortLines = 3; +constexpr uptr kShortDumpSize = kShortLines * kDumpWidth; +constexpr uptr kShortDumpOffset = (kShadowLines - kShortLines) / 2 * kDumpWidth; + +static uptr GetPrintTagStart(uptr addr) { + addr = MemToShadow(addr); + addr = RoundDownTo(addr, kDumpWidth); + addr -= kDumpWidth * (kShadowLines / 2); + return addr; +} + template static void PrintTagInfoAroundAddr(uptr addr, uptr num_rows, InternalScopedString &s, @@ -352,7 +367,7 @@ static void PrintTagsAroundAddr(uptr addr, GetTag get_tag, "Memory tags around the buggy address (one tag corresponds to %zd " "bytes):\n", kShadowAlignment); - PrintTagInfoAroundAddr(addr, 17, s, + PrintTagInfoAroundAddr(addr, kShadowLines, s, [&](InternalScopedString &s, uptr tag_addr) { tag_t tag = get_tag(tag_addr); s.AppendF("%02x", tag); @@ -362,7 +377,7 @@ static void PrintTagsAroundAddr(uptr addr, GetTag get_tag, "Tags for short granules around the buggy address (one tag corresponds " "to %zd bytes):\n", kShadowAlignment); - PrintTagInfoAroundAddr(addr, 3, s, + PrintTagInfoAroundAddr(addr, kShortLines, s, [&](InternalScopedString &s, uptr tag_addr) { tag_t tag = get_tag(tag_addr); if (tag >= 1 && tag <= kShadowAlignment) { @@ -439,8 +454,8 @@ class BaseReport { struct Shadow { uptr addr = 0; - tag_t tags[512] = {}; - tag_t short_tags[ARRAY_SIZE(tags)] = {}; + tag_t tags[kShadowDumpSize] = {}; + tag_t short_tags[kShortDumpSize] = {}; }; sptr FindMismatchOffset() const; @@ -508,16 +523,19 @@ BaseReport::Shadow BaseReport::CopyShadow() const { if (!MemIsApp(untagged_addr)) return result; - result.addr = MemToShadow(untagged_addr) - ARRAY_SIZE(result.tags) / 2; - for (uptr i = 0; i < ARRAY_SIZE(result.tags); ++i) { - uptr tag_addr = result.addr + i; + result.addr = GetPrintTagStart(untagged_addr + mismatch_offset); + uptr tag_addr = result.addr; + uptr short_end = kShortDumpOffset + ARRAY_SIZE(shadow.short_tags); + for (uptr i = 0; i < ARRAY_SIZE(result.tags); ++i, ++tag_addr) { if (!MemIsShadow(tag_addr)) continue; result.tags[i] = *reinterpret_cast(tag_addr); + if (i < kShortDumpOffset || i >= short_end) + continue; uptr granule_addr = ShadowToMem(tag_addr); if (1 <= result.tags[i] && result.tags[i] <= kShadowAlignment && IsAccessibleMemoryRange(granule_addr, kShadowAlignment)) { - result.short_tags[i] = + result.short_tags[i - kShortDumpOffset] = *reinterpret_cast(granule_addr + kShadowAlignment - 1); } } @@ -532,8 +550,8 @@ tag_t BaseReport::GetTagCopy(uptr addr) const { } tag_t BaseReport::GetShortTagCopy(uptr addr) const { - CHECK_GE(addr, shadow.addr); - uptr idx = addr - shadow.addr; + CHECK_GE(addr, shadow.addr + kShortDumpOffset); + uptr idx = addr - shadow.addr - kShortDumpOffset; CHECK_LT(idx, ARRAY_SIZE(shadow.short_tags)); return shadow.short_tags[idx]; } diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h index e7db9547f03b6..e8e61b73f9e0c 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -364,6 +364,8 @@ class MachineIRBuilder { State.Observer = &Observer; } + GISelChangeObserver *getObserver() { return State.Observer; } + void stopObservingChanges() { State.Observer = nullptr; } bool isObservingChanges() const { return State.Observer != nullptr; } diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index c46d78e460b32..c6a7aa17146dd 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -4147,6 +4147,12 @@ class TargetLowering : public TargetLoweringBase { return true; } + /// GlobalISel - return true if it's profitable to perform the combine: + /// shl ([sza]ext x), y => zext (shl x, y) + virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const { + return true; + } + // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of // writing this) is: diff --git a/llvm/include/llvm/Config/llvm-config.h.cmake b/llvm/include/llvm/Config/llvm-config.h.cmake index c9b731de28365..6fb6bebd0dc5b 100644 --- a/llvm/include/llvm/Config/llvm-config.h.cmake +++ b/llvm/include/llvm/Config/llvm-config.h.cmake @@ -16,7 +16,7 @@ /* Indicate that this is LLVM compiled from the amd-gfx branch. */ #define LLVM_HAVE_BRANCH_AMD_GFX -#define LLVM_MAIN_REVISION 475561 +#define LLVM_MAIN_REVISION 475568 /* Define if LLVM_ENABLE_DUMP is enabled */ #cmakedefine LLVM_ENABLE_DUMP diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 2ce6895042409..f79944e824575 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -1719,6 +1719,8 @@ void CombinerHelper::applyCombineMulToShl(MachineInstr &MI, bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData) { assert(MI.getOpcode() == TargetOpcode::G_SHL && KB); + if (!getTargetLowering().isDesirableToPullExtFromShl(MI)) + return false; Register LHS = MI.getOperand(1).getReg(); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index e015f68dabc69..bdde4b5e8e00f 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -690,6 +690,10 @@ class AArch64TargetLowering : public TargetLowering { bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override; + bool isDesirableToPullExtFromShl(const MachineInstr &MI) const override { + return false; + } + /// Returns false if N is a bit extraction pattern of (X >> C) & Mask. bool isDesirableToCommuteXorWithShift(const SDNode *N) const override; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp index 56e564638cdca..51c52aad35949 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp @@ -20,7 +20,9 @@ //===----------------------------------------------------------------------===// #include "AArch64TargetMachine.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/CodeGen/GlobalISel/CSEInfo.h" +#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h" #include "llvm/CodeGen/GlobalISel/Combiner.h" #include "llvm/CodeGen/GlobalISel/CombinerHelper.h" #include "llvm/CodeGen/GlobalISel/CombinerInfo.h" @@ -439,6 +441,22 @@ class AArch64PostLegalizerCombiner : public MachineFunctionPass { private: bool IsOptNone; AArch64PostLegalizerCombinerImplRuleConfig RuleConfig; + + + struct StoreInfo { + GStore *St = nullptr; + // The G_PTR_ADD that's used by the store. We keep this to cache the + // MachineInstr def. + GPtrAdd *Ptr = nullptr; + // The signed offset to the Ptr instruction. + int64_t Offset = 0; + LLT StoredType; + }; + bool tryOptimizeConsecStores(SmallVectorImpl &Stores, + CSEMIRBuilder &MIB); + + bool optimizeConsecutiveMemOpAddressing(MachineFunction &MF, + CSEMIRBuilder &MIB); }; } // end anonymous namespace @@ -492,7 +510,191 @@ bool AArch64PostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) { F.hasMinSize()); AArch64PostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *KB, CSEInfo, RuleConfig, ST, MDT, LI); - return Impl.combineMachineInstrs(); + bool Changed = Impl.combineMachineInstrs(); + + auto MIB = CSEMIRBuilder(MF); + MIB.setCSEInfo(CSEInfo); + Changed |= optimizeConsecutiveMemOpAddressing(MF, MIB); + return Changed; +} + +bool AArch64PostLegalizerCombiner::tryOptimizeConsecStores( + SmallVectorImpl &Stores, CSEMIRBuilder &MIB) { + if (Stores.size() <= 2) + return false; + + // Profitabity checks: + int64_t BaseOffset = Stores[0].Offset; + unsigned NumPairsExpected = Stores.size() / 2; + unsigned TotalInstsExpected = NumPairsExpected + (Stores.size() % 2); + // Size savings will depend on whether we can fold the offset, as an + // immediate of an ADD. + auto &TLI = *MIB.getMF().getSubtarget().getTargetLowering(); + if (!TLI.isLegalAddImmediate(BaseOffset)) + TotalInstsExpected++; + int SavingsExpected = Stores.size() - TotalInstsExpected; + if (SavingsExpected <= 0) + return false; + + auto &MRI = MIB.getMF().getRegInfo(); + + // We have a series of consecutive stores. Factor out the common base + // pointer and rewrite the offsets. + Register NewBase = Stores[0].Ptr->getReg(0); + for (auto &SInfo : Stores) { + // Compute a new pointer with the new base ptr and adjusted offset. + MIB.setInstrAndDebugLoc(*SInfo.St); + auto NewOff = MIB.buildConstant(LLT::scalar(64), SInfo.Offset - BaseOffset); + auto NewPtr = MIB.buildPtrAdd(MRI.getType(SInfo.St->getPointerReg()), + NewBase, NewOff); + if (MIB.getObserver()) + MIB.getObserver()->changingInstr(*SInfo.St); + SInfo.St->getOperand(1).setReg(NewPtr.getReg(0)); + if (MIB.getObserver()) + MIB.getObserver()->changedInstr(*SInfo.St); + } + LLVM_DEBUG(dbgs() << "Split a series of " << Stores.size() + << " stores into a base pointer and offsets.\n"); + return true; +} + +static cl::opt + EnableConsecutiveMemOpOpt("aarch64-postlegalizer-consecutive-memops", + cl::init(true), cl::Hidden, + cl::desc("Enable consecutive memop optimization " + "in AArch64PostLegalizerCombiner")); + +bool AArch64PostLegalizerCombiner::optimizeConsecutiveMemOpAddressing( + MachineFunction &MF, CSEMIRBuilder &MIB) { + // This combine needs to run after all reassociations/folds on pointer + // addressing have been done, specifically those that combine two G_PTR_ADDs + // with constant offsets into a single G_PTR_ADD with a combined offset. + // The goal of this optimization is to undo that combine in the case where + // doing so has prevented the formation of pair stores due to illegal + // addressing modes of STP. The reason that we do it here is because + // it's much easier to undo the transformation of a series consecutive + // mem ops, than it is to detect when doing it would be a bad idea looking + // at a single G_PTR_ADD in the reassociation/ptradd_immed_chain combine. + // + // An example: + // G_STORE %11:_(<2 x s64>), %base:_(p0) :: (store (<2 x s64>), align 1) + // %off1:_(s64) = G_CONSTANT i64 4128 + // %p1:_(p0) = G_PTR_ADD %0:_, %off1:_(s64) + // G_STORE %11:_(<2 x s64>), %p1:_(p0) :: (store (<2 x s64>), align 1) + // %off2:_(s64) = G_CONSTANT i64 4144 + // %p2:_(p0) = G_PTR_ADD %0:_, %off2:_(s64) + // G_STORE %11:_(<2 x s64>), %p2:_(p0) :: (store (<2 x s64>), align 1) + // %off3:_(s64) = G_CONSTANT i64 4160 + // %p3:_(p0) = G_PTR_ADD %0:_, %off3:_(s64) + // G_STORE %11:_(<2 x s64>), %17:_(p0) :: (store (<2 x s64>), align 1) + bool Changed = false; + auto &MRI = MF.getRegInfo(); + + if (!EnableConsecutiveMemOpOpt) + return Changed; + + SmallVector Stores; + // If we see a load, then we keep track of any values defined by it. + // In the following example, STP formation will fail anyway because + // the latter store is using a load result that appears after the + // the prior store. In this situation if we factor out the offset then + // we increase code size for no benefit. + // G_STORE %v1:_(s64), %base:_(p0) :: (store (s64)) + // %v2:_(s64) = G_LOAD %ldptr:_(p0) :: (load (s64)) + // G_STORE %v2:_(s64), %base:_(p0) :: (store (s64)) + SmallVector LoadValsSinceLastStore; + + auto storeIsValid = [&](StoreInfo &Last, StoreInfo New) { + // Check if this store is consecutive to the last one. + if (Last.Ptr->getBaseReg() != New.Ptr->getBaseReg() || + (Last.Offset + static_cast(Last.StoredType.getSizeInBytes()) != + New.Offset) || + Last.StoredType != New.StoredType) + return false; + + // Check if this store is using a load result that appears after the + // last store. If so, bail out. + if (any_of(LoadValsSinceLastStore, [&](Register LoadVal) { + return New.St->getValueReg() == LoadVal; + })) + return false; + + // Check if the current offset would be too large for STP. + // If not, then STP formation should be able to handle it, so we don't + // need to do anything. + int64_t MaxLegalOffset; + switch (New.StoredType.getSizeInBits()) { + case 32: + MaxLegalOffset = 252; + break; + case 64: + MaxLegalOffset = 504; + break; + case 128: + MaxLegalOffset = 1008; + break; + default: + llvm_unreachable("Unexpected stored type size"); + } + if (New.Offset < MaxLegalOffset) + return false; + + // If factoring it out still wouldn't help then don't bother. + return New.Offset - Stores[0].Offset <= MaxLegalOffset; + }; + + auto resetState = [&]() { + Stores.clear(); + LoadValsSinceLastStore.clear(); + }; + + for (auto &MBB : MF) { + // We're looking inside a single BB at a time since the memset pattern + // should only be in a single block. + resetState(); + for (auto &MI : MBB) { + if (auto *St = dyn_cast(&MI)) { + Register PtrBaseReg; + APInt Offset; + LLT StoredValTy = MRI.getType(St->getValueReg()); + unsigned ValSize = StoredValTy.getSizeInBits(); + if (ValSize < 32 || ValSize != St->getMMO().getSizeInBits()) + continue; + + Register PtrReg = St->getPointerReg(); + if (mi_match( + PtrReg, MRI, + m_OneNonDBGUse(m_GPtrAdd(m_Reg(PtrBaseReg), m_ICst(Offset))))) { + GPtrAdd *PtrAdd = cast(MRI.getVRegDef(PtrReg)); + StoreInfo New = {St, PtrAdd, Offset.getSExtValue(), StoredValTy}; + + if (Stores.empty()) { + Stores.push_back(New); + continue; + } + + // Check if this store is a valid continuation of the sequence. + auto &Last = Stores.back(); + if (storeIsValid(Last, New)) { + Stores.push_back(New); + LoadValsSinceLastStore.clear(); // Reset the load value tracking. + } else { + // The store isn't a valid to consider for the prior sequence, + // so try to optimize what we have so far and start a new sequence. + Changed |= tryOptimizeConsecStores(Stores, MIB); + resetState(); + Stores.push_back(New); + } + } + } else if (auto *Ld = dyn_cast(&MI)) { + LoadValsSinceLastStore.push_back(Ld->getDstReg()); + } + } + Changed |= tryOptimizeConsecStores(Stores, MIB); + resetState(); + } + + return Changed; } char AArch64PostLegalizerCombiner::ID = 0; diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp index bdfffc475c90a..2a80296688744 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp @@ -532,21 +532,15 @@ void AMDGPUInstPrinter::printDefaultVccOperand(bool FirstOperand, void AMDGPUInstPrinter::printWaitVDST(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O) { - uint8_t Imm = MI->getOperand(OpNo).getImm(); - if (Imm != 0) { - O << " wait_vdst:"; - printU4ImmDecOperand(MI, OpNo, O); - } + O << " wait_vdst:"; + printU4ImmDecOperand(MI, OpNo, O); } void AMDGPUInstPrinter::printWaitEXP(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O) { - uint8_t Imm = MI->getOperand(OpNo).getImm(); - if (Imm != 0) { - O << " wait_exp:"; - printU4ImmDecOperand(MI, OpNo, O); - } + O << " wait_exp:"; + printU4ImmDecOperand(MI, OpNo, O); } bool AMDGPUInstPrinter::needsImpliedVcc(const MCInstrDesc &Desc, diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/no-reduce-shl-of-ext.ll b/llvm/test/CodeGen/AArch64/GlobalISel/no-reduce-shl-of-ext.ll new file mode 100644 index 0000000000000..ab009cb7cc0e3 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/no-reduce-shl-of-ext.ll @@ -0,0 +1,19 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc %s -verify-machineinstrs -mtriple aarch64-apple-darwin -global-isel -o - | FileCheck %s +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + +%struct.mszip_stream = type { i32, i32, i8, i32, ptr, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr, i32, i32, i32, [288 x i8], [32 x i8], [1152 x i16], [128 x i16], [32768 x i8], ptr, ptr } + +define i16 @test(i32 %bit_buffer.6.lcssa, ptr %zip, ptr %.out) { +; CHECK-LABEL: test: +; CHECK: ; %bb.0: +; CHECK-NEXT: and w8, w0, #0x1ff +; CHECK-NEXT: add x8, x1, w8, uxtw #1 +; CHECK-NEXT: ldrh w0, [x8, #412] +; CHECK-NEXT: ret + %and274 = and i32 %bit_buffer.6.lcssa, 511 + %idxprom275 = zext i32 %and274 to i64 + %arrayidx276 = getelementptr inbounds %struct.mszip_stream, ptr %zip, i64 0, i32 19, i64 %idxprom275 + %ld = load i16, ptr %arrayidx276, align 2 + ret i16 %ld +} diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/split-offsets-for-stp.ll b/llvm/test/CodeGen/AArch64/GlobalISel/split-offsets-for-stp.ll new file mode 100644 index 0000000000000..6aaefff1f7240 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/split-offsets-for-stp.ll @@ -0,0 +1,353 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc -mtriple=aarch64-apple-ios -verify-machineinstrs -global-isel -aarch64-postlegalizer-consecutive-memops=0 < %s | FileCheck %s --check-prefix=CHECK-NO-SPLIT +; RUN: llc -mtriple=aarch64-apple-ios -verify-machineinstrs -global-isel < %s | FileCheck %s --check-prefix=CHECK-SPLIT + +define void @basic_split(ptr %p) { +; CHECK-NO-SPLIT-LABEL: basic_split: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8008] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8016] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8024] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8032] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8040] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: basic_split: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: mov w8, #8000 ; =0x1f40 +; CHECK-SPLIT-NEXT: add x8, x0, x8 +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8] +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8, #16] +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8, #32] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr i64, ptr %p, i64 1000 + store i64 0, ptr %bigoffset + %addr2 = getelementptr i64, ptr %p, i64 1001 + store i64 0, ptr %addr2 + %addr3 = getelementptr i64, ptr %p, i64 1002 + store i64 0, ptr %addr3 + %addr4 = getelementptr i64, ptr %p, i64 1003 + store i64 0, ptr %addr4 + %addr5 = getelementptr i64, ptr %p, i64 1004 + store i64 0, ptr %addr5 + %addr6 = getelementptr i64, ptr %p, i64 1005 + store i64 0, ptr %addr6 + ret void +} + +define void @basic_multi_use_ptr(ptr %p, ptr %p2) { +; CHECK-NO-SPLIT-LABEL: basic_multi_use_ptr: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: mov w8, #8008 ; =0x1f48 +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-NO-SPLIT-NEXT: add x8, x0, x8 +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8008] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8016] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8024] +; CHECK-NO-SPLIT-NEXT: str x8, [x1] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: basic_multi_use_ptr: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: mov w8, #8008 ; =0x1f48 +; CHECK-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-SPLIT-NEXT: add x8, x0, x8 +; CHECK-SPLIT-NEXT: str xzr, [x0, #8008] +; CHECK-SPLIT-NEXT: str xzr, [x0, #8016] +; CHECK-SPLIT-NEXT: str xzr, [x0, #8024] +; CHECK-SPLIT-NEXT: str x8, [x1] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr i64, ptr %p, i64 1000 + store i64 0, ptr %bigoffset + %addr2 = getelementptr i64, ptr %p, i64 1001 + store i64 0, ptr %addr2 + %addr3 = getelementptr i64, ptr %p, i64 1002 + store i64 0, ptr %addr3 + %addr4 = getelementptr i64, ptr %p, i64 1003 + store i64 0, ptr %addr4 + ; multiuse of %addr2 + store ptr %addr2, ptr %p2 + ret void +} + +define void @not_consecutive(ptr %p) { +; CHECK-NO-SPLIT-LABEL: not_consecutive: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8008] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8024] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8032] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: not_consecutive: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-SPLIT-NEXT: str xzr, [x0, #8008] +; CHECK-SPLIT-NEXT: str xzr, [x0, #8024] +; CHECK-SPLIT-NEXT: str xzr, [x0, #8032] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr i64, ptr %p, i64 1000 + store i64 0, ptr %bigoffset + %addr2 = getelementptr i64, ptr %p, i64 1001 + store i64 0, ptr %addr2 + %addr3 = getelementptr i64, ptr %p, i64 1003 + store i64 0, ptr %addr3 + %addr4 = getelementptr i64, ptr %p, i64 1004 + store i64 0, ptr %addr4 + ret void +} + +define void @early_store_is_invalid_but_split_rest(ptr %p) { +; CHECK-NO-SPLIT-LABEL: early_store_is_invalid_but_split_rest: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8080] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8016] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8024] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8032] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8040] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8048] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8056] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: early_store_is_invalid_but_split_rest: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: mov w8, #8016 ; =0x1f50 +; CHECK-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-SPLIT-NEXT: add x8, x0, x8 +; CHECK-SPLIT-NEXT: str xzr, [x0, #8080] +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8] +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8, #16] +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8, #32] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr i64, ptr %p, i64 1000 + store i64 0, ptr %bigoffset + %addr2 = getelementptr i64, ptr %p, i64 1010 + store i64 0, ptr %addr2 + %addr3 = getelementptr i64, ptr %p, i64 1002 + store i64 0, ptr %addr3 + %addr4 = getelementptr i64, ptr %p, i64 1003 + store i64 0, ptr %addr4 + %addr5 = getelementptr i64, ptr %p, i64 1004 + store i64 0, ptr %addr5 + %addr6 = getelementptr i64, ptr %p, i64 1005 + store i64 0, ptr %addr6 + %addr7 = getelementptr i64, ptr %p, i64 1006 + store i64 0, ptr %addr7 + %addr8 = getelementptr i64, ptr %p, i64 1007 + store i64 0, ptr %addr8 + ret void +} + +define void @vector(ptr %p) { +; CHECK-NO-SPLIT-LABEL: vector: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: movi.2d v0, #0000000000000000 +; CHECK-NO-SPLIT-NEXT: str q0, [x0, #16000] +; CHECK-NO-SPLIT-NEXT: str q0, [x0, #16016] +; CHECK-NO-SPLIT-NEXT: str q0, [x0, #16032] +; CHECK-NO-SPLIT-NEXT: str q0, [x0, #16048] +; CHECK-NO-SPLIT-NEXT: str q0, [x0, #16064] +; CHECK-NO-SPLIT-NEXT: str q0, [x0, #16080] +; CHECK-NO-SPLIT-NEXT: str q0, [x0, #16096] +; CHECK-NO-SPLIT-NEXT: str q0, [x0, #16112] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: vector: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: movi.2d v0, #0000000000000000 +; CHECK-SPLIT-NEXT: mov w8, #16000 ; =0x3e80 +; CHECK-SPLIT-NEXT: add x8, x0, x8 +; CHECK-SPLIT-NEXT: stp q0, q0, [x8] +; CHECK-SPLIT-NEXT: stp q0, q0, [x8, #32] +; CHECK-SPLIT-NEXT: stp q0, q0, [x8, #64] +; CHECK-SPLIT-NEXT: stp q0, q0, [x8, #96] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr <2 x i64>, ptr %p, i64 1000 + store <2 x i64> , ptr %bigoffset + %addr2 = getelementptr <2 x i64>, ptr %p, i64 1001 + store <2 x i64> , ptr %addr2 + %addr3 = getelementptr <2 x i64>, ptr %p, i64 1002 + store <2 x i64> , ptr %addr3 + %addr4 = getelementptr <2 x i64>, ptr %p, i64 1003 + store <2 x i64> , ptr %addr4 + %addr5 = getelementptr <2 x i64>, ptr %p, i64 1004 + store <2 x i64> , ptr %addr5 + %addr6 = getelementptr <2 x i64>, ptr %p, i64 1005 + store <2 x i64> , ptr %addr6 + %addr7 = getelementptr <2 x i64>, ptr %p, i64 1006 + store <2 x i64> , ptr %addr7 + %addr8 = getelementptr <2 x i64>, ptr %p, i64 1007 + store <2 x i64> , ptr %addr8 + ret void +} + +define void @can_already_form_stp(ptr %p) { +; CHECK-NO-SPLIT-LABEL: can_already_form_stp: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: stp xzr, xzr, [x0, #80] +; CHECK-NO-SPLIT-NEXT: stp xzr, xzr, [x0, #96] +; CHECK-NO-SPLIT-NEXT: stp xzr, xzr, [x0, #112] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: can_already_form_stp: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x0, #80] +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x0, #96] +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x0, #112] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr i64, ptr %p, i64 10 + store i64 0, ptr %bigoffset + %addr2 = getelementptr i64, ptr %p, i64 11 + store i64 0, ptr %addr2 + %addr3 = getelementptr i64, ptr %p, i64 12 + store i64 0, ptr %addr3 + %addr4 = getelementptr i64, ptr %p, i64 13 + store i64 0, ptr %addr4 + %addr5 = getelementptr i64, ptr %p, i64 14 + store i64 0, ptr %addr5 + %addr6 = getelementptr i64, ptr %p, i64 15 + store i64 0, ptr %addr6 + ret void +} + +define void @use_of_load_in_between(ptr %p, ptr %ldptr, ptr %ldptr2) { +; CHECK-NO-SPLIT-LABEL: use_of_load_in_between: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-NO-SPLIT-NEXT: ldr x8, [x1] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8016] +; CHECK-NO-SPLIT-NEXT: str x8, [x0, #8008] +; CHECK-NO-SPLIT-NEXT: ldr x8, [x2] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8032] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8040] +; CHECK-NO-SPLIT-NEXT: str x8, [x0, #8024] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: use_of_load_in_between: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-SPLIT-NEXT: ldr x8, [x1] +; CHECK-SPLIT-NEXT: str xzr, [x0, #8016] +; CHECK-SPLIT-NEXT: str x8, [x0, #8008] +; CHECK-SPLIT-NEXT: ldr x8, [x2] +; CHECK-SPLIT-NEXT: str xzr, [x0, #8032] +; CHECK-SPLIT-NEXT: str xzr, [x0, #8040] +; CHECK-SPLIT-NEXT: str x8, [x0, #8024] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr i64, ptr %p, i64 1000 + store i64 0, ptr %bigoffset + %addr2 = getelementptr i64, ptr %p, i64 1001 + %ld = load i64, ptr %ldptr + store i64 %ld, ptr %addr2 + %addr3 = getelementptr i64, ptr %p, i64 1002 + store i64 0, ptr %addr3 + %addr4 = getelementptr i64, ptr %p, i64 1003 + %ld2 = load i64, ptr %ldptr2 + store i64 %ld2, ptr %addr4 + %addr5 = getelementptr i64, ptr %p, i64 1004 + store i64 0, ptr %addr5 + %addr6 = getelementptr i64, ptr %p, i64 1005 + store i64 0, ptr %addr6 + ret void +} + +define void @offset_legal_for_add_imm(ptr %p) { +; CHECK-NO-SPLIT-LABEL: offset_legal_for_add_imm: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #3200] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #3208] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #3216] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: offset_legal_for_add_imm: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: add x8, x0, #3200 +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8] +; CHECK-SPLIT-NEXT: str xzr, [x8, #16] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr i64, ptr %p, i64 400 + store i64 0, ptr %bigoffset + %addr2 = getelementptr i64, ptr %p, i64 401 + store i64 0, ptr %addr2 + %addr3 = getelementptr i64, ptr %p, i64 402 + store i64 0, ptr %addr3 + ret void +} + +define void @offset_illegal_for_add_imm(ptr %p) { +; CHECK-NO-SPLIT-LABEL: offset_illegal_for_add_imm: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8008] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8016] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: offset_illegal_for_add_imm: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-SPLIT-NEXT: str xzr, [x0, #8008] +; CHECK-SPLIT-NEXT: str xzr, [x0, #8016] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr i64, ptr %p, i64 1000 + store i64 0, ptr %bigoffset + %addr2 = getelementptr i64, ptr %p, i64 1001 + store i64 0, ptr %addr2 + %addr3 = getelementptr i64, ptr %p, i64 1002 + store i64 0, ptr %addr3 + ret void +} + +define void @offset_legal_for_add_imm_4_stores(ptr %p) { +; CHECK-NO-SPLIT-LABEL: offset_legal_for_add_imm_4_stores: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #3200] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #3208] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #3216] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #3224] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: offset_legal_for_add_imm_4_stores: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: add x8, x0, #3200 +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8] +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8, #16] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr i64, ptr %p, i64 400 + store i64 0, ptr %bigoffset + %addr2 = getelementptr i64, ptr %p, i64 401 + store i64 0, ptr %addr2 + %addr3 = getelementptr i64, ptr %p, i64 402 + store i64 0, ptr %addr3 + %addr4 = getelementptr i64, ptr %p, i64 403 + store i64 0, ptr %addr4 + ret void +} + +define void @offset_illegal_for_add_imm_4_stores(ptr %p) { +; CHECK-NO-SPLIT-LABEL: offset_illegal_for_add_imm_4_stores: +; CHECK-NO-SPLIT: ; %bb.0: +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8000] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8008] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8016] +; CHECK-NO-SPLIT-NEXT: str xzr, [x0, #8024] +; CHECK-NO-SPLIT-NEXT: ret +; +; CHECK-SPLIT-LABEL: offset_illegal_for_add_imm_4_stores: +; CHECK-SPLIT: ; %bb.0: +; CHECK-SPLIT-NEXT: mov w8, #8000 ; =0x1f40 +; CHECK-SPLIT-NEXT: add x8, x0, x8 +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8] +; CHECK-SPLIT-NEXT: stp xzr, xzr, [x8, #16] +; CHECK-SPLIT-NEXT: ret + %bigoffset = getelementptr i64, ptr %p, i64 1000 + store i64 0, ptr %bigoffset + %addr2 = getelementptr i64, ptr %p, i64 1001 + store i64 0, ptr %addr2 + %addr3 = getelementptr i64, ptr %p, i64 1002 + store i64 0, ptr %addr3 + %addr4 = getelementptr i64, ptr %p, i64 1003 + store i64 0, ptr %addr4 + ret void +} diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_ldsdir.s b/llvm/test/MC/AMDGPU/gfx11_asm_ldsdir.s index 8a8daab9a3a7e..9b1ba24053816 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_ldsdir.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_ldsdir.s @@ -46,10 +46,10 @@ lds_direct_load v15 wait_vdst:1 // GFX11: lds_direct_load v15 wait_vdst:1 ; encoding: [0x0f,0x00,0x11,0xce] lds_direct_load v16 wait_vdst:0 -// GFX11: lds_direct_load v16 ; encoding: [0x10,0x00,0x10,0xce] +// GFX11: lds_direct_load v16 wait_vdst:0 ; encoding: [0x10,0x00,0x10,0xce] lds_direct_load v17 -// GFX11: lds_direct_load v17 ; encoding: [0x11,0x00,0x10,0xce] +// GFX11: lds_direct_load v17 wait_vdst:0 ; encoding: [0x11,0x00,0x10,0xce] lds_param_load v1, attr0.x wait_vdst:15 // GFX11: lds_param_load v1, attr0.x wait_vdst:15 ; encoding: [0x01,0x00,0x0f,0xce] diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s b/llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s index 0a3396b454b9c..e2e53776783f3 100644 --- a/llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s +++ b/llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s @@ -1,31 +1,31 @@ // RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -show-encoding %s | FileCheck -check-prefix=GFX11 %s v_interp_p10_f32 v0, v1, v2, v3 -// GFX11: v_interp_p10_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f32 v1, v10, v20, v30 -// GFX11: v_interp_p10_f32 v1, v10, v20, v30 ; encoding: [0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04] +// GFX11: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0 ; encoding: [0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04] v_interp_p10_f32 v2, v11, v21, v31 -// GFX11: v_interp_p10_f32 v2, v11, v21, v31 ; encoding: [0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04] +// GFX11: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0 ; encoding: [0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04] v_interp_p10_f32 v3, v12, v22, v32 -// GFX11: v_interp_p10_f32 v3, v12, v22, v32 ; encoding: [0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04] +// GFX11: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0 ; encoding: [0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04] v_interp_p10_f32 v0, v1, v2, v3 clamp -// GFX11: v_interp_p10_f32 v0, v1, v2, v3 clamp ; encoding: [0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f32 v0, -v1, v2, v3 -// GFX11: v_interp_p10_f32 v0, -v1, v2, v3 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24] +// GFX11: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24] v_interp_p10_f32 v0, v1, -v2, v3 -// GFX11: v_interp_p10_f32 v0, v1, -v2, v3 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44] +// GFX11: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44] v_interp_p10_f32 v0, v1, v2, -v3 -// GFX11: v_interp_p10_f32 v0, v1, v2, -v3 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84] +// GFX11: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84] v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 -// GFX11: v_interp_p10_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1 // GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04] @@ -37,31 +37,31 @@ v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7 // GFX11: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7 ; encoding: [0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f32 v0, v1, v2, v3 -// GFX11: v_interp_p2_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f32 v1, v10, v20, v30 -// GFX11: v_interp_p2_f32 v1, v10, v20, v30 ; encoding: [0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04] +// GFX11: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0 ; encoding: [0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04] v_interp_p2_f32 v2, v11, v21, v31 -// GFX11: v_interp_p2_f32 v2, v11, v21, v31 ; encoding: [0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04] +// GFX11: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0 ; encoding: [0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04] v_interp_p2_f32 v3, v12, v22, v32 -// GFX11: v_interp_p2_f32 v3, v12, v22, v32 ; encoding: [0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04] +// GFX11: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0 ; encoding: [0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04] v_interp_p2_f32 v0, v1, v2, v3 clamp -// GFX11: v_interp_p2_f32 v0, v1, v2, v3 clamp ; encoding: [0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f32 v0, -v1, v2, v3 -// GFX11: v_interp_p2_f32 v0, -v1, v2, v3 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24] +// GFX11: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24] v_interp_p2_f32 v0, v1, -v2, v3 -// GFX11: v_interp_p2_f32 v0, v1, -v2, v3 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44] +// GFX11: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44] v_interp_p2_f32 v0, v1, v2, -v3 -// GFX11: v_interp_p2_f32 v0, v1, v2, -v3 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84] +// GFX11: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84] v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 -// GFX11: v_interp_p2_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1 // GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04] @@ -73,22 +73,22 @@ v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7 // GFX11: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7 ; encoding: [0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, v1, v2, v3 -// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, -v1, v2, v3 -// GFX11: v_interp_p10_f16_f32 v0, -v1, v2, v3 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24] +// GFX11: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24] v_interp_p10_f16_f32 v0, v1, -v2, v3 -// GFX11: v_interp_p10_f16_f32 v0, v1, -v2, v3 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44] +// GFX11: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44] v_interp_p10_f16_f32 v0, v1, v2, -v3 -// GFX11: v_interp_p10_f16_f32 v0, v1, v2, -v3 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84] +// GFX11: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84] v_interp_p10_f16_f32 v0, v1, v2, v3 clamp -// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp ; encoding: [0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 -// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1 // GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04] @@ -97,22 +97,22 @@ v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7 // GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0] -// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] -// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] ; encoding: [0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] -// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] ; encoding: [0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] -// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] ; encoding: [0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] -// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] ; encoding: [0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] -// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] ; encoding: [0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 // GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x02,0xcd,0x01,0x05,0x0e,0x04] @@ -124,22 +124,22 @@ v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 // GFX11: v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0xe4] v_interp_p2_f16_f32 v0, v1, v2, v3 -// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f16_f32 v0, -v1, v2, v3 -// GFX11: v_interp_p2_f16_f32 v0, -v1, v2, v3 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24] +// GFX11: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24] v_interp_p2_f16_f32 v0, v1, -v2, v3 -// GFX11: v_interp_p2_f16_f32 v0, v1, -v2, v3 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44] +// GFX11: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44] v_interp_p2_f16_f32 v0, v1, v2, -v3 -// GFX11: v_interp_p2_f16_f32 v0, v1, v2, -v3 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84] +// GFX11: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84] v_interp_p2_f16_f32 v0, v1, v2, v3 clamp -// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp ; encoding: [0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 -// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1 // GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04] @@ -148,22 +148,22 @@ v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7 // GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0] -// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] -// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] ; encoding: [0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] -// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] ; encoding: [0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] -// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] ; encoding: [0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] -// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] ; encoding: [0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] -// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] ; encoding: [0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 // GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x03,0xcd,0x01,0x05,0x0e,0x04] @@ -175,22 +175,22 @@ v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 // GFX11: v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0xe4] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 -// GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24] +// GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24] v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44] v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp ; encoding: [0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 // GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04] @@ -199,22 +199,22 @@ v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 // GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0] -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] ; encoding: [0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] ; encoding: [0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] ; encoding: [0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] ; encoding: [0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] -// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] ; encoding: [0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04] v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 // GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x04,0xcd,0x01,0x05,0x0e,0x04] @@ -226,22 +226,22 @@ v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 // GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0xe4] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 -// GFX11: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24] +// GFX11: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24] v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44] v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp ; encoding: [0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 // GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04] @@ -250,22 +250,22 @@ v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 // GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0] -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] ; encoding: [0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] ; encoding: [0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] ; encoding: [0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] ; encoding: [0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] -// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] ; encoding: [0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04] +// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04] v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 // GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x05,0xcd,0x01,0x05,0x0e,0x04] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_ldsdir.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_ldsdir.txt index 018af4646c1f9..d9803fdfd7ac2 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_ldsdir.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_ldsdir.txt @@ -19,10 +19,10 @@ # GFX11: lds_direct_load v15 wait_vdst:1 ; encoding: [0x0f,0x00,0x11,0xce] 0x0f,0x00,0x11,0xce -# GFX11: lds_direct_load v16 ; encoding: [0x10,0x00,0x10,0xce] +# GFX11: lds_direct_load v16 wait_vdst:0 ; encoding: [0x10,0x00,0x10,0xce] 0x10,0x00,0x10,0xce -# GFX11: lds_direct_load v17 ; encoding: [0x11,0x00,0x10,0xce] +# GFX11: lds_direct_load v17 wait_vdst:0 ; encoding: [0x11,0x00,0x10,0xce] 0x11,0x00,0x10,0xce # GFX11: lds_direct_load v1 wait_vdst:15 ; encoding: [0x01,0x00,0x1f,0xce] @@ -70,10 +70,10 @@ # GFX11: lds_param_load v15, attr63.z wait_vdst:1 ; encoding: [0x0f,0xfe,0x01,0xce] 0x0f,0xfe,0x01,0xce -# GFX11: lds_param_load v16, attr63.w ; encoding: [0x10,0xff,0x00,0xce] +# GFX11: lds_param_load v16, attr63.w wait_vdst:0 ; encoding: [0x10,0xff,0x00,0xce] 0x10,0xff,0x00,0xce -# GFX11: lds_param_load v17, attr63.w ; encoding: [0x11,0xff,0x00,0xce] +# GFX11: lds_param_load v17, attr63.w wait_vdst:0 ; encoding: [0x11,0xff,0x00,0xce] 0x11,0xff,0x00,0xce # GFX11: lds_param_load v1, attr0.x wait_vdst:15 ; encoding: [0x01,0x00,0x0f,0xce] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt index da1f129fcf40e..b22fd5e289fae 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt @@ -1,31 +1,31 @@ # RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble %s | FileCheck -strict-whitespace -check-prefix=GFX11 %s -# GFX11: v_interp_p10_f32 v0, v1, v2, v3{{$}} +# GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04 # Check that unused bits in the encoding are ignored. -# GFX11: v_interp_p10_f32 v0, v1, v2, v3{{$}} +# GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x80,0xcd,0x01,0x05,0x0e,0x1c -# GFX11: v_interp_p10_f32 v1, v10, v20, v30{{$}} +# GFX11: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0{{$}} 0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04 -# GFX11: v_interp_p10_f32 v2, v11, v21, v31{{$}} +# GFX11: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0{{$}} 0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04 -# GFX11: v_interp_p10_f32 v3, v12, v22, v32{{$}} +# GFX11: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0{{$}} 0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04 -# GFX11: v_interp_p10_f32 v0, v1, v2, v3 clamp{{$}} +# GFX11: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}} 0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_f32 v0, -v1, v2, v3{{$}} +# GFX11: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24 -# GFX11: v_interp_p10_f32 v0, v1, -v2, v3{{$}} +# GFX11: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0{{$}} 0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44 -# GFX11: v_interp_p10_f32 v0, v1, v2, -v3{{$}} +# GFX11: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0{{$}} 0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84 # GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1{{$}} @@ -37,28 +37,28 @@ # GFX11: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}} 0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_f32 v0, v1, v2, v3{{$}} +# GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_f32 v1, v10, v20, v30{{$}} +# GFX11: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0{{$}} 0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04 -# GFX11: v_interp_p2_f32 v2, v11, v21, v31{{$}} +# GFX11: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0{{$}} 0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04 -# GFX11: v_interp_p2_f32 v3, v12, v22, v32{{$}} +# GFX11: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0{{$}} 0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04 -# GFX11: v_interp_p2_f32 v0, v1, v2, v3 clamp{{$}} +# GFX11: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}} 0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_f32 v0, -v1, v2, v3{{$}} +# GFX11: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24 -# GFX11: v_interp_p2_f32 v0, v1, -v2, v3{{$}} +# GFX11: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0{{$}} 0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44 -# GFX11: v_interp_p2_f32 v0, v1, v2, -v3{{$}} +# GFX11: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0{{$}} 0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84 # GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1{{$}} @@ -70,19 +70,19 @@ # GFX11: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}} 0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3{{$}} +# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_f16_f32 v0, -v1, v2, v3{{$}} +# GFX11: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24 -# GFX11: v_interp_p10_f16_f32 v0, v1, -v2, v3{{$}} +# GFX11: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}} 0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44 -# GFX11: v_interp_p10_f16_f32 v0, v1, v2, -v3{{$}} +# GFX11: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}} 0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84 -# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp{{$}} +# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}} 0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04 # GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}} @@ -91,19 +91,19 @@ # GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}} 0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]{{$}} +# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}} 0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]{{$}} +# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}} 0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]{{$}} +# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}} 0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]{{$}} +# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}} 0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]{{$}} +# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}} 0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04 # GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}} @@ -115,19 +115,19 @@ # GFX11: v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}} 0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0xe4 -# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3{{$}} +# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_f16_f32 v0, -v1, v2, v3{{$}} +# GFX11: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24 -# GFX11: v_interp_p2_f16_f32 v0, v1, -v2, v3{{$}} +# GFX11: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}} 0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44 -# GFX11: v_interp_p2_f16_f32 v0, v1, v2, -v3{{$}} +# GFX11: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}} 0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84 -# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp{{$}} +# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}} 0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04 # GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}} @@ -136,19 +136,19 @@ # GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}} 0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]{{$}} +# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}} 0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]{{$}} +# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}} 0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]{{$}} +# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}} 0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]{{$}} +# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}} 0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]{{$}} +# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}} 0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04 # GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}} @@ -160,19 +160,19 @@ # GFX11: v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}} 0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0xe4 -# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3{{$}} +# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3{{$}} +# GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24 -# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3{{$}} +# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}} 0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44 -# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3{{$}} +# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}} 0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84 -# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp{{$}} +# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}} 0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04 # GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}} @@ -181,19 +181,19 @@ # GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}} 0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]{{$}} +# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}} 0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]{{$}} +# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}} 0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]{{$}} +# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}} 0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]{{$}} +# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}} 0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]{{$}} +# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}} 0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04 # GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}} @@ -205,19 +205,19 @@ # GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}} 0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0xe4 -# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3{{$}} +# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3{{$}} +# GFX11: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}} 0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24 -# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3{{$}} +# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}} 0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44 -# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3{{$}} +# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}} 0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84 -# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp{{$}} +# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}} 0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04 # GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}} @@ -226,19 +226,19 @@ # GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}} 0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]{{$}} +# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}} 0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]{{$}} +# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}} 0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]{{$}} +# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}} 0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]{{$}} +# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}} 0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04 -# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]{{$}} +# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}} 0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04 # GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}} diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td index e671c5e323a09..19d7f599c5f75 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -14,22 +14,15 @@ include "mlir/IR/EnumAttr.td" include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td" include "mlir/IR/TensorEncoding.td" -// All of the Tensor attributes will extend this class. +// All of the sparse tensor attributes will extend this class. class SparseTensor_Attr traits = []> : AttrDef; //===----------------------------------------------------------------------===// -// Type aliases. -// -// These attributes are just like `IndexAttr` (include/mlir/IR/OpBase.td), -// except that: -// (1) the `summary` is more specific (i.e., the fourth parameter to -// `TypedAttrBase`), which helps tablegen provide better error messages. -// (2) tablegen-generated getters will have the given `returnType`, in -// lieu of the `APInt` that `IndexAttr` uses. This avoids the boilerplate -// of needing to say `get{FOO}().getZExtValue()`, as well as using -// C++ types which better document intent. +// These attributes are just like `IndexAttr` except that they clarify whether +// the index refers to a dimension (an axis of the semantic tensor) or a level +// (an axis of the actual storage format). //===----------------------------------------------------------------------===// def DimensionAttr : @@ -107,79 +100,71 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", let mnemonic = "encoding"; let description = [{ - An attribute to encode TACO-style information on sparsity properties - of tensors. The encoding is eventually used by a **sparse compiler** - pass to generate sparse code fully automatically for all tensor - expressions that involve tensors with a sparse encoding. Compiler - passes that run before this sparse compiler pass need to be - aware of the semantics of tensor types with such an encoding. - - Each sparse tensor comes equipped with two different sets of axes for - describing the tensor's multi-dimensional structure. We use the term - "dimension" to refer to the axes of the semantic tensor itself; whereas, - we use the term "level" to refer to the axes of the storage scheme, - which is the operational representation of that tensor. Therefore, - the fields of the encoding attribute (further explained below) satisfy - the following correspondences: - - - Dimensions: - - the shape of the tensor type - - the `dimSlices` field - - the arguments of the `dimToLvl` field - - Levels: - - the results of the `dimToLvl` field - - the `lvlTypes` field - - The attribute consists of the following fields. - - - Level-type for each level of a tensor type: - - **dense** : all entries along this level are stored. - - **compressed** : only nonzeros along this level are stored. - - **singleton** : a variant of the compressed level-format, - for when coordinates are guaranteed to have no siblings at this level. - By default, each level-type has the property of being unique (no - duplicates at that level) and ordered (coordinates appear sorted - at that level). The following two suffixes can be used to specify - that the level should instead be non-unique (duplicates may appear) - and/or non-ordered (coordinates may appear unsorted). - - **-nu** : not unique - - **-no** : not ordered - Currently, these suffixes (if present) must appear in this order. - In the future, we may introduce additional level-types and - properties, and split up how the level-format and properties are - specified rather than using this suffix mechanism. - - - An optional affine map from dimension-coordinates to level-coordinates; - defaulting to the identity map. For example, given a 2-d tensor: - `(i, j) -> (i, j)` specifies row-wise storage, `(i, j) -> (j, i)` - specifies column-wise storage, and - `(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)` - specifies 2x3 block-sparsity. For block-sparsity, blocks are typically - stored with compression while dense storage is used within each block - (although hybrid schemes are possible as well). - - (The following will be corrected in an upcoming change that completely - overhauls the syntax of this attribute.) - - The dimToLvl mapping also provides a notion of "counting a - dimension", where every stored element with the same coordinate - is mapped to a new slice. For instance, ELL storage of a 2-d - tensor can be defined with the mapping `(i, j) -> (#i, i, j)` - using the notation of [Chou20]. Lacking the `#` symbol in MLIR's - affine mapping, we use a free symbol `c` to define such counting, - together with a constant that denotes the number of resulting - slices. For example, the mapping `(i, j)[c] -> (c * 3 * i, i, j)` - with the level-types `["dense", "dense", "compressed"]` denotes ELL - storage with three jagged diagonals that count the dimension `i`. - - - The required bitwidth for "position" storage (integral offsets + An attribute to encode information on sparsity properties of tensors, inspired + by the TACO formalization of sparse tensors. This encoding is eventually used + by a **sparsifier** pass to generate sparse code fully automatically from a + sparsity-agnostic representation of the computation, i.e., an implicit sparse + representation is converted to an explicit sparse representation where co-iterating + loops operate on sparse storage formats rather than tensors with a sparsity + encoding. Compiler passes that run before this sparse compiler pass need to + be aware of the semantics of tensor types with such a sparsity encoding. + + In this encoding, we use `dimension` to refer to the axes of the semantic tensor, + and `level` to refer to the axes of the actual storage format, i.e., the + operational representation of the sparse tensor in memory. The number of + dimensions is usually the same as the number of levels (such as CSR storage format). + However, the encoding can also map dimensions to higher-order levels (for example, + to encode a block-sparse BSR storage format) or to lower-order levels + (for example, to linearize dimensions as a single level in the storage). + + The encoding contains a `map` that provides the following: + + - An ordered sequence of dimension specifications, each of which defines: + - the dimension-size (implicit from the tensor’s dimension-shape) + - a **dimension-expression** + - An ordered sequence of level specifications, each of which includes a required + **level-type**, which defines how the level should be stored. Each level-type + consists of: + - a **level-format** + - a collection of **level-properties** that apply to the level-format + - a **level-expression**, which defines what is stored + + Each level-expression is an affine expression over dimension-variables. Thus, the + level-expressions collectively define an affine map from dimension-coordinates to + level-coordinates. The dimension-expressions collectively define the inverse map, + which only needs to be provided for elaborate cases where it cannot be inferred + automatically. Within the sparse storage format, we refer to indices that are + stored explicitly as `coordinates` and indices into the storage format as `positions`. + + The supported level-formats are the following: + + - **dense** : all entries along this level are stored + - **compressed** : only nonzeros along this level are stored + - **singleton** : a variant of the compressed format, where coordinates have no siblings + + Different level-formats may have different collections of level-properties. + By default, each level-type has the property of being unique (no duplicate + coordinates at that level), ordered (coordinates appear sorted at that + level), and, for compression, storing the positions in a compact way where + an interval is defined by a lower bound "pos(i)" and an upper bound "pos(i+1)-1". + The following properties can be added to a level-format to change this + default behavior: + + - **nonunique** : duplicate coordinates may appear at the level + - **nonordered** : coordinates may appear in arbribratry order + - **high** : the upper bound is stored explicitly in a separate array + - **block2_4** : the compression uses a 2:4 encoding per 1x4 block + + In addition to the `map`, the following two fields are optional: + + - The required bitwidth for `position` storage (integral offsets into the sparse storage scheme). A narrow width reduces the memory footprint of overhead storage, as long as the width suffices to define the total required range (viz. the maximum number of stored entries over all indirection levels). The choices are `8`, `16`, `32`, `64`, or, the default, `0` to indicate the native bitwidth. - - The required bitwidth for "coordinate" storage (the coordinates + - The required bitwidth for `coordinate` storage (the coordinates of stored entries). A narrow width reduces the memory footprint of overhead storage, as long as the width suffices to define the total required range (viz. the maximum value of each tensor @@ -194,26 +179,38 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", ```mlir // Sparse vector. #SparseVector = #sparse_tensor.encoding<{ - map = (d0) -> (d0 : compressed) + map = (i) -> (i : compressed) }> ... tensor ... - // Sorted Coordinate Scheme. + // Sorted coordinate scheme. #SortedCOO = #sparse_tensor.encoding<{ - map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) + map = (i, j) -> (i : compressed(nonunique), j : singleton) }> ... tensor ... + // Batched sorted coordinate scheme, with high encoding. + #BCOO = #sparse_tensor.encoding<{ + map = (i, j, k) -> (i : dense, j : compressed(nonunique, high), k : singleton) + }> + ... tensor<10x10xf32, #BCOO> ... + + // Compressed sparse row. + #CSR = #sparse_tensor.encoding<{ + map = (i, j) -> (i : dense, j : compressed) + }> + ... tensor<100x100xbf16, #CSR> ... + // Doubly compressed sparse column storage with specific bitwidths. #DCSC = #sparse_tensor.encoding<{ - map = (d0, d1) -> (d1 : compressed, d0 : compressed), + map = (i, j) -> (j : compressed, i : compressed), posWidth = 32, crdWidth = 8 }> ... tensor<8x8xf64, #DCSC> ... // Block sparse row storage (2x3 blocks). - #BCSR = #sparse_tensor.encoding<{ + #BSR = #sparse_tensor.encoding<{ map = ( i, j ) -> ( i floordiv 2 : compressed, j floordiv 3 : compressed, @@ -221,14 +218,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", j mod 3 : dense ) }> - ... tensor<20x30xf32, #BCSR> ... - - // ELL storage (4 jagged diagonals, i.e., at most 4 nonzeros per row). - #ELL = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense", "compressed" ], - dimToLvl = affine_map<(i, j)[c] -> (c * 4 * i, i, j)> - }> - ... tensor ... + ... tensor<20x30xf32, #BSR> ... // CSR slice (offset = 0, size = 4, stride = 1 on the first dimension; // offset = 0, size = 8, and a dynamic stride on the second dimension). @@ -444,7 +434,6 @@ def AnyRankedSparseTensor : RankedSparseTensorOf<[AnyType]>; class ScalarLikeOf allowedTypes> : AnyTypeOf<[0DTensorOf, AnyTypeOf]>; - //===----------------------------------------------------------------------===// // Sparse Tensor Sorting Algorithm Attribute. //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h index fcf7eb4a616b0..fc0c80036ff79 100644 --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h @@ -131,6 +131,24 @@ inline bool isReductionIterator(Attribute attr) { return cast(attr).getValue() == IteratorType::reduction; } +/// Returns the integer numbers in `values`. `values` are expected to be +/// constant operations. +SmallVector getAsIntegers(ArrayRef values); + +/// Returns the integer numbers in `foldResults`. `foldResults` are expected to +/// be constant operations. +SmallVector getAsIntegers(ArrayRef foldResults); + +/// Convert `foldResults` into Values. Integer attributes are converted to +/// constant op. +SmallVector getAsValues(OpBuilder &builder, Location loc, + ArrayRef foldResults); + +/// Returns the constant index ops in `values`. `values` are expected to be +/// constant operations. +SmallVector +getAsConstantIndexOps(ArrayRef values); + //===----------------------------------------------------------------------===// // Vector Masking Utilities //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td index 701eefcc1e7da..ea96f26601268 100644 --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td @@ -523,9 +523,7 @@ def Vector_ExtractOp : Vector_Op<"extract", [Pure, PredOpTrait<"operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>, - InferTypeOpAdaptorWithIsCompatible]>, - Arguments<(ins AnyVectorOfAnyRank:$vector, DenseI64ArrayAttr:$position)>, - Results<(outs AnyType)> { + InferTypeOpAdaptorWithIsCompatible]> { let summary = "extract operation"; let description = [{ Takes an n-D vector and a k-D position and extracts the (n-k)-D vector at @@ -535,21 +533,55 @@ def Vector_ExtractOp : ```mlir %1 = vector.extract %0[3]: vector<4x8x16xf32> - %2 = vector.extract %0[3, 3, 3]: vector<4x8x16xf32> + %2 = vector.extract %0[2, 1, 3]: vector<4x8x16xf32> %3 = vector.extract %1[]: vector + %4 = vector.extract %0[%a, %b, %c]: vector<4x8x16xf32> + %5 = vector.extract %0[2, %b]: vector<4x8x16xf32> ``` }]; + + let arguments = (ins + AnyVectorOfAnyRank:$vector, + Variadic:$dynamic_position, + DenseI64ArrayAttr:$static_position + ); + let results = (outs AnyType:$result); + let builders = [ - // Convenience builder which assumes the values in `position` are defined by - // ConstantIndexOp. - OpBuilder<(ins "Value":$source, "ValueRange":$position)> + OpBuilder<(ins "Value":$source, "int64_t":$position)>, + OpBuilder<(ins "Value":$source, "OpFoldResult":$position)>, + OpBuilder<(ins "Value":$source, "ArrayRef":$position)>, + OpBuilder<(ins "Value":$source, "ArrayRef":$position)>, ]; + let extraClassDeclaration = [{ VectorType getSourceVectorType() { return ::llvm::cast(getVector().getType()); } + + /// Return a vector with all the static and dynamic position indices. + SmallVector getMixedPosition() { + OpBuilder builder(getContext()); + return getMixedValues(getStaticPosition(), getDynamicPosition(), builder); + } + + unsigned getNumIndices() { + return getStaticPosition().size(); + } + + bool hasDynamicPosition() { + auto dynPos = getDynamicPosition(); + return std::any_of(dynPos.begin(), dynPos.end(), + [](Value operand) { return operand != nullptr; }); + } }]; - let assemblyFormat = "$vector `` $position attr-dict `:` type($vector)"; + + let assemblyFormat = [{ + $vector `` + custom($dynamic_position, $static_position) + attr-dict `:` type($vector) + }]; + let hasCanonicalizer = 1; let hasFolder = 1; let hasVerifier = 1; @@ -638,9 +670,7 @@ def Vector_InsertOp : Vector_Op<"insert", [Pure, PredOpTrait<"source operand and result have same element type", TCresVTEtIsSameAsOpBase<0, 0>>, - AllTypesMatch<["dest", "res"]>]>, - Arguments<(ins AnyType:$source, AnyVectorOfAnyRank:$dest, DenseI64ArrayAttr:$position)>, - Results<(outs AnyVectorOfAnyRank:$res)> { + AllTypesMatch<["dest", "result"]>]> { let summary = "insert operation"; let description = [{ Takes an n-D source vector, an (n+k)-D destination vector and a k-D position @@ -651,24 +681,53 @@ def Vector_InsertOp : ```mlir %2 = vector.insert %0, %1[3] : vector<8x16xf32> into vector<4x8x16xf32> - %5 = vector.insert %3, %4[3, 3, 3] : f32 into vector<4x8x16xf32> + %5 = vector.insert %3, %4[2, 1, 3] : f32 into vector<4x8x16xf32> %8 = vector.insert %6, %7[] : f32 into vector - %11 = vector.insert %9, %10[3, 3, 3] : vector into vector<4x8x16xf32> + %11 = vector.insert %9, %10[%a, %b, %c] : vector into vector<4x8x16xf32> + %12 = vector.insert %4, %10[2, %b] : vector<16xf32> into vector<4x8x16xf32> ``` }]; - let assemblyFormat = [{ - $source `,` $dest $position attr-dict `:` type($source) `into` type($dest) - }]; + + let arguments = (ins + AnyType:$source, + AnyVectorOfAnyRank:$dest, + Variadic:$dynamic_position, + DenseI64ArrayAttr:$static_position + ); + let results = (outs AnyVectorOfAnyRank:$result); let builders = [ - // Convenience builder which assumes all values are constant indices. - OpBuilder<(ins "Value":$source, "Value":$dest, "ValueRange":$position)> + OpBuilder<(ins "Value":$source, "Value":$dest, "int64_t":$position)>, + OpBuilder<(ins "Value":$source, "Value":$dest, "OpFoldResult":$position)>, + OpBuilder<(ins "Value":$source, "Value":$dest, "ArrayRef":$position)>, + OpBuilder<(ins "Value":$source, "Value":$dest, "ArrayRef":$position)>, ]; + let extraClassDeclaration = [{ Type getSourceType() { return getSource().getType(); } VectorType getDestVectorType() { return ::llvm::cast(getDest().getType()); } + + /// Return a vector with all the static and dynamic position indices. + SmallVector getMixedPosition() { + OpBuilder builder(getContext()); + return getMixedValues(getStaticPosition(), getDynamicPosition(), builder); + } + + unsigned getNumIndices() { + return getStaticPosition().size(); + } + + bool hasDynamicPosition() { + return llvm::any_of(getDynamicPosition(), + [](Value operand) { return operand != nullptr; }); + } + }]; + + let assemblyFormat = [{ + $source `,` $dest custom($dynamic_position, $static_position) + attr-dict `:` type($source) `into` type($dest) }]; let hasCanonicalizer = 1; diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp index 2c08257fc3089..3f77c5b5f24e9 100644 --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -126,6 +126,18 @@ static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc, return rewriter.create(loc, pType, ptr); } +/// Convert `foldResult` into a Value. Integer attribute is converted to +/// an LLVM constant op. +static Value getAsLLVMValue(OpBuilder &builder, Location loc, + OpFoldResult foldResult) { + if (auto attr = foldResult.dyn_cast()) { + auto intAttr = cast(attr); + return builder.create(loc, intAttr).getResult(); + } + + return foldResult.get(); +} + namespace { /// Trivial Vector to LLVM conversions @@ -1079,41 +1091,53 @@ class VectorExtractOpConversion auto loc = extractOp->getLoc(); auto resultType = extractOp.getResult().getType(); auto llvmResultType = typeConverter->convertType(resultType); - ArrayRef positionArray = extractOp.getPosition(); - // Bail if result type cannot be lowered. if (!llvmResultType) return failure(); + SmallVector positionVec; + for (auto [idx, pos] : llvm::enumerate(extractOp.getMixedPosition())) { + if (pos.is()) + // Make sure we use the value that has been already converted to LLVM. + positionVec.push_back(adaptor.getDynamicPosition()[idx]); + else + positionVec.push_back(pos); + } + // Extract entire vector. Should be handled by folder, but just to be safe. - if (positionArray.empty()) { + ArrayRef position(positionVec); + if (position.empty()) { rewriter.replaceOp(extractOp, adaptor.getVector()); return success(); } // One-shot extraction of vector from array (only requires extractvalue). if (isa(resultType)) { + if (extractOp.hasDynamicPosition()) + return failure(); + Value extracted = rewriter.create( - loc, adaptor.getVector(), positionArray); + loc, adaptor.getVector(), getAsIntegers(position)); rewriter.replaceOp(extractOp, extracted); return success(); } // Potential extraction of 1-D vector from array. Value extracted = adaptor.getVector(); - if (positionArray.size() > 1) { - extracted = rewriter.create( - loc, extracted, positionArray.drop_back()); - } + if (position.size() > 1) { + if (extractOp.hasDynamicPosition()) + return failure(); - // Remaining extraction of element from 1-D LLVM vector - auto i64Type = IntegerType::get(rewriter.getContext(), 64); - auto constant = - rewriter.create(loc, i64Type, positionArray.back()); - extracted = - rewriter.create(loc, extracted, constant); - rewriter.replaceOp(extractOp, extracted); + SmallVector nMinusOnePosition = + getAsIntegers(position.drop_back()); + extracted = rewriter.create(loc, extracted, + nMinusOnePosition); + } + Value lastPosition = getAsLLVMValue(rewriter, loc, position.back()); + // Remaining extraction of element from 1-D LLVM vector. + rewriter.replaceOpWithNewOp(extractOp, extracted, + lastPosition); return success(); } }; @@ -1194,23 +1218,34 @@ class VectorInsertOpConversion auto sourceType = insertOp.getSourceType(); auto destVectorType = insertOp.getDestVectorType(); auto llvmResultType = typeConverter->convertType(destVectorType); - ArrayRef positionArray = insertOp.getPosition(); - // Bail if result type cannot be lowered. if (!llvmResultType) return failure(); + SmallVector positionVec; + for (auto [idx, pos] : llvm::enumerate(insertOp.getMixedPosition())) { + if (pos.is()) + // Make sure we use the value that has been already converted to LLVM. + positionVec.push_back(adaptor.getDynamicPosition()[idx]); + else + positionVec.push_back(pos); + } + // Overwrite entire vector with value. Should be handled by folder, but // just to be safe. - if (positionArray.empty()) { + ArrayRef position(positionVec); + if (position.empty()) { rewriter.replaceOp(insertOp, adaptor.getSource()); return success(); } // One-shot insertion of a vector into an array (only requires insertvalue). if (isa(sourceType)) { + if (insertOp.hasDynamicPosition()) + return failure(); + Value inserted = rewriter.create( - loc, adaptor.getDest(), adaptor.getSource(), positionArray); + loc, adaptor.getDest(), adaptor.getSource(), getAsIntegers(position)); rewriter.replaceOp(insertOp, inserted); return success(); } @@ -1218,24 +1253,28 @@ class VectorInsertOpConversion // Potential extraction of 1-D vector from array. Value extracted = adaptor.getDest(); auto oneDVectorType = destVectorType; - if (positionArray.size() > 1) { + if (position.size() > 1) { + if (insertOp.hasDynamicPosition()) + return failure(); + oneDVectorType = reducedVectorTypeBack(destVectorType); extracted = rewriter.create( - loc, extracted, positionArray.drop_back()); + loc, extracted, getAsIntegers(position.drop_back())); } // Insertion of an element into a 1-D LLVM vector. - auto i64Type = IntegerType::get(rewriter.getContext(), 64); - auto constant = - rewriter.create(loc, i64Type, positionArray.back()); Value inserted = rewriter.create( loc, typeConverter->convertType(oneDVectorType), extracted, - adaptor.getSource(), constant); + adaptor.getSource(), getAsLLVMValue(rewriter, loc, position.back())); // Potential insertion of resulting 1-D vector into array. - if (positionArray.size() > 1) { + if (position.size() > 1) { + if (insertOp.hasDynamicPosition()) + return failure(); + inserted = rewriter.create( - loc, adaptor.getDest(), inserted, positionArray.drop_back()); + loc, adaptor.getDest(), inserted, + getAsIntegers(position.drop_back())); } rewriter.replaceOp(insertOp, inserted); diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp index 1aeed4594f945..f8fd89c542c06 100644 --- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp +++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp @@ -1063,10 +1063,11 @@ struct UnrollTransferReadConversion /// If the result of the TransferReadOp has exactly one user, which is a /// vector::InsertOp, return that operation's indices. void getInsertionIndices(TransferReadOp xferOp, - SmallVector &indices) const { - if (auto insertOp = getInsertOp(xferOp)) - indices.assign(insertOp.getPosition().begin(), - insertOp.getPosition().end()); + SmallVectorImpl &indices) const { + if (auto insertOp = getInsertOp(xferOp)) { + auto pos = insertOp.getMixedPosition(); + indices.append(pos.begin(), pos.end()); + } } /// Rewrite the op: Unpack one dimension. Can handle masks, out-of-bounds @@ -1110,9 +1111,9 @@ struct UnrollTransferReadConversion getXferIndices(b, xferOp, iv, xferIndices); // Indices for the new vector.insert op. - SmallVector insertionIndices; + SmallVector insertionIndices; getInsertionIndices(xferOp, insertionIndices); - insertionIndices.push_back(i); + insertionIndices.push_back(rewriter.getIndexAttr(i)); auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr()); auto newXferOp = b.create( @@ -1195,10 +1196,11 @@ struct UnrollTransferWriteConversion /// If the input of the given TransferWriteOp is an ExtractOp, return its /// indices. void getExtractionIndices(TransferWriteOp xferOp, - SmallVector &indices) const { - if (auto extractOp = getExtractOp(xferOp)) - indices.assign(extractOp.getPosition().begin(), - extractOp.getPosition().end()); + SmallVectorImpl &indices) const { + if (auto extractOp = getExtractOp(xferOp)) { + auto pos = extractOp.getMixedPosition(); + indices.append(pos.begin(), pos.end()); + } } /// Rewrite the op: Unpack one dimension. Can handle masks, out-of-bounds @@ -1235,9 +1237,9 @@ struct UnrollTransferWriteConversion getXferIndices(b, xferOp, iv, xferIndices); // Indices for the new vector.extract op. - SmallVector extractionIndices; + SmallVector extractionIndices; getExtractionIndices(xferOp, extractionIndices); - extractionIndices.push_back(i); + extractionIndices.push_back(b.getI64IntegerAttr(i)); auto extracted = b.create(loc, vec, extractionIndices); diff --git a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp index a8c68abc8bcbf..9b29179f36871 100644 --- a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp +++ b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp @@ -35,11 +35,25 @@ using namespace mlir; -/// Gets the first integer value from `attr`, assuming it is an integer array -/// attribute. +/// Returns the integer value from the first valid input element, assuming Value +/// inputs are defined by a constant index ops and Attribute inputs are integer +/// attributes. +static uint64_t getFirstIntValue(ValueRange values) { + return values[0].getDefiningOp().value(); +} +static uint64_t getFirstIntValue(ArrayRef attr) { + return cast(attr[0]).getInt(); +} static uint64_t getFirstIntValue(ArrayAttr attr) { return (*attr.getAsValueRange().begin()).getZExtValue(); } +static uint64_t getFirstIntValue(ArrayRef foldResults) { + auto attr = foldResults[0].dyn_cast(); + if (attr) + return getFirstIntValue(attr); + + return getFirstIntValue(ValueRange{foldResults[0].get()}); +} /// Returns the number of bits for the given scalar/vector type. static int getNumBits(Type type) { @@ -141,9 +155,7 @@ struct VectorExtractOpConvert final LogicalResult matchAndRewrite(vector::ExtractOp extractOp, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - // Only support extracting a scalar value now. - VectorType resultVectorType = dyn_cast(extractOp.getType()); - if (resultVectorType && resultVectorType.getNumElements() > 1) + if (extractOp.hasDynamicPosition()) return failure(); Type dstType = getTypeConverter()->convertType(extractOp.getType()); @@ -155,7 +167,7 @@ struct VectorExtractOpConvert final return success(); } - int32_t id = extractOp.getPosition()[0]; + int32_t id = getFirstIntValue(extractOp.getMixedPosition()); rewriter.replaceOpWithNewOp( extractOp, adaptor.getVector(), id); return success(); @@ -235,7 +247,7 @@ struct VectorInsertOpConvert final return success(); } - int32_t id = insertOp.getPosition()[0]; + int32_t id = getFirstIntValue(insertOp.getMixedPosition()); rewriter.replaceOpWithNewOp( insertOp, adaptor.getSource(), adaptor.getDest(), id); return success(); diff --git a/mlir/lib/Dialect/Arith/Transforms/IntNarrowing.cpp b/mlir/lib/Dialect/Arith/Transforms/IntNarrowing.cpp index 906c13a6579f1..1084fbc890053 100644 --- a/mlir/lib/Dialect/Arith/Transforms/IntNarrowing.cpp +++ b/mlir/lib/Dialect/Arith/Transforms/IntNarrowing.cpp @@ -516,7 +516,7 @@ struct ExtensionOverExtract final : NarrowingPattern { return failure(); Value newExtract = rewriter.create( - op.getLoc(), ext->getIn(), op.getPosition()); + op.getLoc(), ext->getIn(), op.getMixedPosition()); ext->recreateAndReplace(rewriter, op, newExtract); return success(); } @@ -645,8 +645,9 @@ struct ExtensionOverInsert final vector::InsertOp origInsert, Value narrowValue, Value narrowDest) const override { - return rewriter.create( - origInsert.getLoc(), narrowValue, narrowDest, origInsert.getPosition()); + return rewriter.create(origInsert.getLoc(), narrowValue, + narrowDest, + origInsert.getMixedPosition()); } }; diff --git a/mlir/lib/Dialect/NVGPU/Transforms/CreateAsyncGroups.cpp b/mlir/lib/Dialect/NVGPU/Transforms/CreateAsyncGroups.cpp index ad2180d501148..f63825cdc8f61 100644 --- a/mlir/lib/Dialect/NVGPU/Transforms/CreateAsyncGroups.cpp +++ b/mlir/lib/Dialect/NVGPU/Transforms/CreateAsyncGroups.cpp @@ -74,7 +74,7 @@ static FailureOr getMaskOp(Operation *loadOp) { if (auto maskOp = extractOp.getVector().getDefiningOp()) return TransferMask{maskOp, - SmallVector(extractOp.getPosition())}; + SmallVector(extractOp.getStaticPosition())}; // All other cases: not supported. return failure(); diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp index 7b9c5f9b879e8..85d21938d0ab7 100644 --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -223,6 +223,48 @@ static LogicalResult incSlicePosition(MutableArrayRef position, return failure(); } +/// Returns the integer numbers in `values`. `values` are expected to be +/// constant operations. +SmallVector vector::getAsIntegers(ArrayRef values) { + SmallVector ints; + llvm::transform(values, std::back_inserter(ints), [](Value value) { + auto constOp = value.getDefiningOp(); + assert(constOp && "Unexpected non-constant index"); + return constOp.value(); + }); + return ints; +} + +/// Returns the integer numbers in `foldResults`. `foldResults` are expected to +/// be constant operations. +SmallVector vector::getAsIntegers(ArrayRef foldResults) { + SmallVector ints; + llvm::transform( + foldResults, std::back_inserter(ints), [](OpFoldResult foldResult) { + assert(foldResult.is() && "Unexpected non-constant index"); + return cast(foldResult.get()).getInt(); + }); + return ints; +} + +/// Convert `foldResults` into Values. Integer attributes are converted to +/// constant op. +SmallVector vector::getAsValues(OpBuilder &builder, Location loc, + ArrayRef foldResults) { + SmallVector values; + llvm::transform(foldResults, std::back_inserter(values), + [&](OpFoldResult foldResult) { + if (auto attr = foldResult.dyn_cast()) + return builder + .create( + loc, cast(attr).getInt()) + .getResult(); + + return foldResult.get(); + }); + return values; +} + //===----------------------------------------------------------------------===// // CombiningKindAttr //===----------------------------------------------------------------------===// @@ -389,12 +431,11 @@ struct ElideUnitDimsInMultiDimReduction } else { // This means we are reducing all the dimensions, and all reduction // dimensions are of size 1. So a simple extraction would do. - SmallVector zeroAttr(shape.size(), 0); + SmallVector zeroIdx(shape.size(), 0); if (mask) - mask = rewriter.create(loc, rewriter.getI1Type(), - mask, zeroAttr); - cast = rewriter.create( - loc, reductionOp.getDestType(), reductionOp.getSource(), zeroAttr); + mask = rewriter.create(loc, mask, zeroIdx); + cast = rewriter.create(loc, reductionOp.getSource(), + zeroIdx); } Value result = vector::makeArithReduction( @@ -574,11 +615,9 @@ struct ElideSingleElementReduction : public OpRewritePattern { mask = rewriter.create(loc, mask); result = rewriter.create(loc, reductionOp.getVector()); } else { - if (mask) { - mask = rewriter.create(loc, rewriter.getI1Type(), mask, 0); - } - result = rewriter.create(loc, reductionOp.getType(), - reductionOp.getVector(), 0); + if (mask) + mask = rewriter.create(loc, mask, 0); + result = rewriter.create(loc, reductionOp.getVector(), 0); } if (Value acc = reductionOp.getAcc()) @@ -1148,12 +1187,29 @@ OpFoldResult vector::ExtractElementOp::fold(FoldAdaptor adaptor) { // ExtractOp //===----------------------------------------------------------------------===// -// Convenience builder which assumes the values are constant indices. void vector::ExtractOp::build(OpBuilder &builder, OperationState &result, - Value source, ValueRange position) { - SmallVector positionConstants = llvm::to_vector(llvm::map_range( - position, [](Value pos) { return getConstantIntValue(pos).value(); })); - build(builder, result, source, positionConstants); + Value source, int64_t position) { + build(builder, result, source, ArrayRef{position}); +} + +void vector::ExtractOp::build(OpBuilder &builder, OperationState &result, + Value source, OpFoldResult position) { + build(builder, result, source, ArrayRef{position}); +} + +void vector::ExtractOp::build(OpBuilder &builder, OperationState &result, + Value source, ArrayRef position) { + build(builder, result, source, /*dynamic_position=*/ArrayRef(), + builder.getDenseI64ArrayAttr(position)); +} + +void vector::ExtractOp::build(OpBuilder &builder, OperationState &result, + Value source, ArrayRef position) { + SmallVector staticPos; + SmallVector dynamicPos; + dispatchIndexOpFoldResults(position, dynamicPos, staticPos); + build(builder, result, source, dynamicPos, + builder.getDenseI64ArrayAttr(staticPos)); } LogicalResult @@ -1161,12 +1217,12 @@ ExtractOp::inferReturnTypes(MLIRContext *, std::optional, ExtractOp::Adaptor adaptor, SmallVectorImpl &inferredReturnTypes) { auto vectorType = llvm::cast(adaptor.getVector().getType()); - if (static_cast(adaptor.getPosition().size()) == + if (static_cast(adaptor.getStaticPosition().size()) == vectorType.getRank()) { inferredReturnTypes.push_back(vectorType.getElementType()); } else { - auto n = - std::min(adaptor.getPosition().size(), vectorType.getRank()); + auto n = std::min(adaptor.getStaticPosition().size(), + vectorType.getRank()); inferredReturnTypes.push_back(VectorType::get( vectorType.getShape().drop_front(n), vectorType.getElementType(), vectorType.getScalableDims().drop_front(n))); @@ -1188,17 +1244,20 @@ bool ExtractOp::isCompatibleReturnTypes(TypeRange l, TypeRange r) { } LogicalResult vector::ExtractOp::verify() { - ArrayRef position = getPosition(); + auto position = getMixedPosition(); if (position.size() > static_cast(getSourceVectorType().getRank())) return emitOpError( "expected position attribute of rank no greater than vector rank"); - for (const auto &en : llvm::enumerate(position)) { - if (en.value() < 0 || - en.value() >= getSourceVectorType().getDimSize(en.index())) - return emitOpError("expected position attribute #") - << (en.index() + 1) - << " to be a non-negative integer smaller than the corresponding " - "vector dimension"; + for (auto [idx, pos] : llvm::enumerate(position)) { + if (pos.is()) { + int64_t constIdx = cast(pos.get()).getInt(); + if (constIdx < 0 || constIdx >= getSourceVectorType().getDimSize(idx)) { + return emitOpError("expected position attribute #") + << (idx + 1) + << " to be a non-negative integer smaller than the " + "corresponding vector dimension"; + } + } } return success(); } @@ -1216,20 +1275,24 @@ static LogicalResult foldExtractOpFromExtractChain(ExtractOp extractOp) { if (!extractOp.getVector().getDefiningOp()) return failure(); - SmallVector globalPosition; + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition()) + return failure(); + + SmallVector globalPosition; ExtractOp currentOp = extractOp; - ArrayRef extrPos = currentOp.getPosition(); + ArrayRef extrPos = currentOp.getStaticPosition(); globalPosition.append(extrPos.rbegin(), extrPos.rend()); while (ExtractOp nextOp = currentOp.getVector().getDefiningOp()) { currentOp = nextOp; - ArrayRef extrPos = currentOp.getPosition(); + ArrayRef extrPos = currentOp.getStaticPosition(); globalPosition.append(extrPos.rbegin(), extrPos.rend()); } - extractOp.setOperand(currentOp.getVector()); + extractOp.setOperand(0, currentOp.getVector()); // OpBuilder is only used as a helper to build an I64ArrayAttr. OpBuilder b(extractOp.getContext()); std::reverse(globalPosition.begin(), globalPosition.end()); - extractOp.setPosition(globalPosition); + extractOp.setStaticPosition(globalPosition); return success(); } @@ -1335,19 +1398,23 @@ class ExtractFromInsertTransposeChainState { ExtractFromInsertTransposeChainState::ExtractFromInsertTransposeChainState( ExtractOp e) : extractOp(e), vectorRank(extractOp.getSourceVectorType().getRank()), - extractedRank(extractOp.getPosition().size()) { - assert(vectorRank >= extractedRank && "extracted pos overflow"); + extractedRank(extractOp.getNumIndices()) { + assert(vectorRank >= extractedRank && "Extracted position overflow"); sentinels.reserve(vectorRank - extractedRank); for (int64_t i = 0, e = vectorRank - extractedRank; i < e; ++i) sentinels.push_back(-(i + 1)); - extractPosition.assign(extractOp.getPosition().begin(), - extractOp.getPosition().end()); + extractPosition.assign(extractOp.getStaticPosition().begin(), + extractOp.getStaticPosition().end()); llvm::append_range(extractPosition, sentinels); } // Case 1. If we hit a transpose, just compose the map and iterate. // Invariant: insert + transpose do not change rank, we can always compose. LogicalResult ExtractFromInsertTransposeChainState::handleTransposeOp() { + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition()) + return failure(); + if (!nextTransposeOp) return failure(); auto permutation = extractVector(nextTransposeOp.getTransp()); @@ -1361,7 +1428,11 @@ LogicalResult ExtractFromInsertTransposeChainState::handleTransposeOp() { LogicalResult ExtractFromInsertTransposeChainState::handleInsertOpWithMatchingPos( Value &res) { - ArrayRef insertedPos = nextInsertOp.getPosition(); + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition() || nextInsertOp.hasDynamicPosition()) + return failure(); + + ArrayRef insertedPos = nextInsertOp.getStaticPosition(); if (insertedPos != llvm::ArrayRef(extractPosition).take_front(extractedRank)) return failure(); // Case 2.a. early-exit fold. @@ -1375,7 +1446,11 @@ ExtractFromInsertTransposeChainState::handleInsertOpWithMatchingPos( /// This method updates the internal state. LogicalResult ExtractFromInsertTransposeChainState::handleInsertOpWithPrefixPos(Value &res) { - ArrayRef insertedPos = nextInsertOp.getPosition(); + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition() || nextInsertOp.hasDynamicPosition()) + return failure(); + + ArrayRef insertedPos = nextInsertOp.getStaticPosition(); if (!isContainedWithin(insertedPos, extractPosition)) return failure(); // Set leading dims to zero. @@ -1395,19 +1470,29 @@ ExtractFromInsertTransposeChainState::handleInsertOpWithPrefixPos(Value &res) { /// internal tranposition in the result). Value ExtractFromInsertTransposeChainState::tryToFoldExtractOpInPlace( Value source) { + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition()) + return Value(); + // If we can't fold (either internal transposition, or nothing to fold), bail. bool nothingToFold = (source == extractOp.getVector()); if (nothingToFold || !canFold()) return Value(); + // Otherwise, fold by updating the op inplace and return its result. OpBuilder b(extractOp.getContext()); - extractOp.setPosition(ArrayRef(extractPosition).take_front(extractedRank)); + extractOp.setStaticPosition( + ArrayRef(extractPosition).take_front(extractedRank)); extractOp.getVectorMutable().assign(source); return extractOp.getResult(); } /// Iterate over producing insert and transpose ops until we find a fold. Value ExtractFromInsertTransposeChainState::fold() { + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition()) + return Value(); + Value valueToExtractFrom = extractOp.getVector(); updateStateForNextIteration(valueToExtractFrom); while (nextInsertOp || nextTransposeOp) { @@ -1431,7 +1516,7 @@ Value ExtractFromInsertTransposeChainState::fold() { // Case 4: extractPositionRef intersects insertedPosRef on non-sentinel // values. This is a more difficult case and we bail. - ArrayRef insertedPos = nextInsertOp.getPosition(); + ArrayRef insertedPos = nextInsertOp.getStaticPosition(); if (isContainedWithin(extractPosition, insertedPos) || intersectsWhereNonNegative(extractPosition, insertedPos)) return Value(); @@ -1457,6 +1542,10 @@ static bool hasZeroDimVectors(Operation *op) { /// Fold extractOp with scalar result coming from BroadcastOp or SplatOp. static Value foldExtractFromBroadcast(ExtractOp extractOp) { + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition()) + return Value(); + Operation *defOp = extractOp.getVector().getDefiningOp(); if (!defOp || !isa(defOp)) return Value(); @@ -1497,7 +1586,7 @@ static Value foldExtractFromBroadcast(ExtractOp extractOp) { // extract position to `0` when extracting from the source operand. llvm::SetVector broadcastedUnitDims = broadcastOp.computeBroadcastedUnitDims(); - SmallVector extractPos(extractOp.getPosition()); + SmallVector extractPos(extractOp.getStaticPosition()); int64_t broadcastRankDiff = broadcastDstRank - broadcastSrcRank; for (int64_t i = broadcastRankDiff, e = extractPos.size(); i < e; ++i) if (broadcastedUnitDims.contains(i)) @@ -1509,13 +1598,17 @@ static Value foldExtractFromBroadcast(ExtractOp extractOp) { std::next(extractPos.begin(), extractPos.size() - rankDiff)); // OpBuilder is only used as a helper to build an I64ArrayAttr. OpBuilder b(extractOp.getContext()); - extractOp.setOperand(source); - extractOp.setPosition(extractPos); + extractOp.setOperand(0, source); + extractOp.setStaticPosition(extractPos); return extractOp.getResult(); } // Fold extractOp with source coming from ShapeCast op. static Value foldExtractFromShapeCast(ExtractOp extractOp) { + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition()) + return Value(); + auto shapeCastOp = extractOp.getVector().getDefiningOp(); if (!shapeCastOp) return Value(); @@ -1549,7 +1642,7 @@ static Value foldExtractFromShapeCast(ExtractOp extractOp) { } // Extract the strides associated with the extract op vector source. Then use // this to calculate a linearized position for the extract. - SmallVector extractedPos(extractOp.getPosition()); + SmallVector extractedPos(extractOp.getStaticPosition()); std::reverse(extractedPos.begin(), extractedPos.end()); SmallVector strides; int64_t stride = 1; @@ -1575,13 +1668,17 @@ static Value foldExtractFromShapeCast(ExtractOp extractOp) { SmallVector newPosition = delinearize(position, newStrides); // OpBuilder is only used as a helper to build an I64ArrayAttr. OpBuilder b(extractOp.getContext()); - extractOp.setPosition(newPosition); - extractOp.setOperand(shapeCastOp.getSource()); + extractOp.setStaticPosition(newPosition); + extractOp.setOperand(0, shapeCastOp.getSource()); return extractOp.getResult(); } /// Fold an ExtractOp from ExtractStridedSliceOp. static Value foldExtractFromExtractStrided(ExtractOp extractOp) { + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition()) + return Value(); + auto extractStridedSliceOp = extractOp.getVector().getDefiningOp(); if (!extractStridedSliceOp) @@ -1615,19 +1712,25 @@ static Value foldExtractFromExtractStrided(ExtractOp extractOp) { if (destinationRank > extractStridedSliceOp.getSourceVectorType().getRank() - sliceOffsets.size()) return Value(); - SmallVector extractedPos(extractOp.getPosition()); + + SmallVector extractedPos(extractOp.getStaticPosition()); assert(extractedPos.size() >= sliceOffsets.size()); for (size_t i = 0, e = sliceOffsets.size(); i < e; i++) extractedPos[i] = extractedPos[i] + sliceOffsets[i]; extractOp.getVectorMutable().assign(extractStridedSliceOp.getVector()); + // OpBuilder is only used as a helper to build an I64ArrayAttr. OpBuilder b(extractOp.getContext()); - extractOp.setPosition(extractedPos); + extractOp.setStaticPosition(extractedPos); return extractOp.getResult(); } /// Fold extract_op fed from a chain of insertStridedSlice ops. static Value foldExtractStridedOpFromInsertChain(ExtractOp extractOp) { + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition()) + return Value(); + int64_t destinationRank = llvm::isa(extractOp.getType()) ? llvm::cast(extractOp.getType()).getRank() @@ -1647,7 +1750,7 @@ static Value foldExtractStridedOpFromInsertChain(ExtractOp extractOp) { if (destinationRank > insertOp.getSourceVectorType().getRank()) return Value(); auto insertOffsets = extractVector(insertOp.getOffsets()); - ArrayRef extractOffsets = extractOp.getPosition(); + ArrayRef extractOffsets = extractOp.getStaticPosition(); if (llvm::any_of(insertOp.getStrides(), [](Attribute attr) { return llvm::cast(attr).getInt() != 1; @@ -1687,7 +1790,7 @@ static Value foldExtractStridedOpFromInsertChain(ExtractOp extractOp) { extractOp.getVectorMutable().assign(insertOp.getSource()); // OpBuilder is only used as a helper to build an I64ArrayAttr. OpBuilder b(extractOp.getContext()); - extractOp.setPosition(offsetDiffs); + extractOp.setStaticPosition(offsetDiffs); return extractOp.getResult(); } // If the chunk extracted is disjoint from the chunk inserted, keep @@ -1698,7 +1801,7 @@ static Value foldExtractStridedOpFromInsertChain(ExtractOp extractOp) { } OpFoldResult ExtractOp::fold(FoldAdaptor) { - if (getPosition().empty()) + if (getNumIndices() == 0) return getVector(); if (succeeded(foldExtractOpFromExtractChain(*this))) return getResult(); @@ -1788,6 +1891,10 @@ class ExtractOpNonSplatConstantFolder final LogicalResult matchAndRewrite(ExtractOp extractOp, PatternRewriter &rewriter) const override { + // TODO: Canonicalization for dynamic position not implemented yet. + if (extractOp.hasDynamicPosition()) + return failure(); + // Return if 'ExtractOp' operand is not defined by a compatible vector // ConstantOp. Value sourceVector = extractOp.getVector(); @@ -1807,7 +1914,7 @@ class ExtractOpNonSplatConstantFolder final // Calculate the linearized position of the continuous chunk of elements to // extract. llvm::SmallVector completePositions(vecTy.getRank(), 0); - copy(extractOp.getPosition(), completePositions.begin()); + copy(extractOp.getStaticPosition(), completePositions.begin()); int64_t elemBeginPosition = linearize(completePositions, computeStrides(vecTy.getShape())); auto denseValuesBegin = dense.value_begin() + elemBeginPosition; @@ -2322,18 +2429,38 @@ OpFoldResult vector::InsertElementOp::fold(FoldAdaptor adaptor) { // InsertOp //===----------------------------------------------------------------------===// -// Convenience builder which assumes the values are constant indices. -void InsertOp::build(OpBuilder &builder, OperationState &result, Value source, - Value dest, ValueRange position) { - SmallVector positionConstants = - llvm::to_vector<4>(llvm::map_range(position, [](Value pos) { - return getConstantIntValue(pos).value(); - })); - build(builder, result, source, dest, positionConstants); +void vector::InsertOp::build(OpBuilder &builder, OperationState &result, + Value source, Value dest, int64_t position) { + build(builder, result, source, dest, ArrayRef{position}); +} + +void vector::InsertOp::build(OpBuilder &builder, OperationState &result, + Value source, Value dest, OpFoldResult position) { + build(builder, result, source, dest, ArrayRef{position}); +} + +void vector::InsertOp::build(OpBuilder &builder, OperationState &result, + Value source, Value dest, + ArrayRef position) { + SmallVector posVals; + posVals.reserve(position.size()); + llvm::transform(position, std::back_inserter(posVals), + [&](int64_t pos) { return builder.getI64IntegerAttr(pos); }); + build(builder, result, source, dest, posVals); +} + +void vector::InsertOp::build(OpBuilder &builder, OperationState &result, + Value source, Value dest, + ArrayRef position) { + SmallVector staticPos; + SmallVector dynamicPos; + dispatchIndexOpFoldResults(position, dynamicPos, staticPos); + build(builder, result, source, dest, dynamicPos, + builder.getDenseI64ArrayAttr(staticPos)); } LogicalResult InsertOp::verify() { - ArrayRef position = getPosition(); + SmallVector position = getMixedPosition(); auto destVectorType = getDestVectorType(); if (position.size() > static_cast(destVectorType.getRank())) return emitOpError( @@ -2348,13 +2475,17 @@ LogicalResult InsertOp::verify() { (position.size() != static_cast(destVectorType.getRank()))) return emitOpError( "expected position attribute rank to match the dest vector rank"); - for (const auto &en : llvm::enumerate(position)) { - int64_t attr = en.value(); - if (attr < 0 || attr >= destVectorType.getDimSize(en.index())) - return emitOpError("expected position attribute #") - << (en.index() + 1) - << " to be a non-negative integer smaller than the corresponding " - "dest vector dimension"; + for (auto [idx, pos] : llvm::enumerate(position)) { + if (auto attr = pos.dyn_cast()) { + int64_t constIdx = cast(attr).getInt(); + if (constIdx < 0 || constIdx >= destVectorType.getDimSize(idx)) { + return emitOpError("expected position attribute #") + << (idx + 1) + << " to be a non-negative integer smaller than the " + "corresponding " + "dest vector dimension"; + } + } } return success(); } @@ -2411,6 +2542,10 @@ class InsertOpConstantFolder final : public OpRewritePattern { LogicalResult matchAndRewrite(InsertOp op, PatternRewriter &rewriter) const override { + // TODO: Canonicalization for dynamic position not implemented yet. + if (op.hasDynamicPosition()) + return failure(); + // Return if 'InsertOp' operand is not defined by a compatible vector // ConstantOp. TypedValue destVector = op.getDest(); @@ -2437,7 +2572,7 @@ class InsertOpConstantFolder final : public OpRewritePattern { // Calculate the linearized position of the continuous chunk of elements to // insert. llvm::SmallVector completePositions(destTy.getRank(), 0); - copy(op.getPosition(), completePositions.begin()); + copy(op.getStaticPosition(), completePositions.begin()); int64_t insertBeginPosition = linearize(completePositions, computeStrides(destTy.getShape())); @@ -2468,7 +2603,7 @@ void InsertOp::getCanonicalizationPatterns(RewritePatternSet &results, // value. This happens when the source and destination vectors have identical // sizes. OpFoldResult vector::InsertOp::fold(FoldAdaptor adaptor) { - if (getPosition().empty()) + if (getNumIndices() == 0) return getSource(); return {}; } diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp index 64ab0abda26e6..7560db2332cf8 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp @@ -89,20 +89,20 @@ static Value reshapeLoad(Location loc, Value val, VectorType type, PatternRewriter &rewriter) { if (index == -1) return val; - Type lowType = type.getRank() > 1 ? VectorType::Builder(type).dropDim(0) - : type.getElementType(); + // At extraction dimension? if (index == 0) - return rewriter.create(loc, lowType, val, pos); + return rewriter.create(loc, val, pos); + // Unroll leading dimensions. - VectorType vType = cast(lowType); + VectorType vType = VectorType::Builder(type).dropDim(0); VectorType resType = VectorType::Builder(type).dropDim(index); Value result = rewriter.create( loc, resType, rewriter.getZeroAttr(resType)); for (int64_t d = 0, e = resType.getDimSize(0); d < e; d++) { - Value ext = rewriter.create(loc, vType, val, d); + Value ext = rewriter.create(loc, val, d); Value load = reshapeLoad(loc, ext, vType, index - 1, pos, rewriter); - result = rewriter.create(loc, resType, load, result, d); + result = rewriter.create(loc, load, result, d); } return result; } @@ -117,16 +117,15 @@ static Value reshapeStore(Location loc, Value val, Value result, return val; // At insertion dimension? if (index == 0) - return rewriter.create(loc, type, val, result, pos); + return rewriter.create(loc, val, result, pos); + // Unroll leading dimensions. - VectorType lowType = VectorType::Builder(type).dropDim(0); - Type insType = lowType.getRank() > 1 ? VectorType::Builder(lowType).dropDim(0) - : lowType.getElementType(); + VectorType vType = VectorType::Builder(type).dropDim(0); for (int64_t d = 0, e = type.getDimSize(0); d < e; d++) { - Value ext = rewriter.create(loc, lowType, result, d); - Value ins = rewriter.create(loc, insType, val, d); - Value sto = reshapeStore(loc, ins, ext, lowType, index - 1, pos, rewriter); - result = rewriter.create(loc, type, sto, result, d); + Value ext = rewriter.create(loc, result, d); + Value ins = rewriter.create(loc, val, d); + Value sto = reshapeStore(loc, ins, ext, vType, index - 1, pos, rewriter); + result = rewriter.create(loc, sto, result, d); } return result; } @@ -1175,7 +1174,7 @@ class OuterProductOpLowering : public OpRewritePattern { loc, a, op.getRhs(), r, kind, rewriter, isInt, extrMask); if (!m.has_value()) return failure(); - result = rewriter.create(loc, resType, *m, result, d); + result = rewriter.create(loc, *m, result, d); } rewriter.replaceOp(rootOp, result); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp index 95b5ea011c825..887d1af764541 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp @@ -79,7 +79,7 @@ class CreateMaskOpLowering : public OpRewritePattern { Value val = rewriter.create(loc, arith::CmpIPredicate::slt, bnd, idx); Value sel = rewriter.create(loc, val, trueVal, falseVal); - result = rewriter.create(loc, dstType, sel, result, d); + result = rewriter.create(loc, sel, result, d); } rewriter.replaceOp(op, result); return success(); @@ -151,8 +151,8 @@ class ConstantMaskOpLowering : public OpRewritePattern { Value result = rewriter.create( loc, dstType, rewriter.getZeroAttr(dstType)); for (int64_t d = 0; d < trueDimSize; d++) - result = - rewriter.create(loc, dstType, trueVal, result, d); + result = rewriter.create(loc, trueVal, result, d); + rewriter.replaceOp(op, result); return success(); } diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp index 2a50947e976df..f4486ea117a29 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp @@ -1040,13 +1040,17 @@ struct WarpOpExtract : public OpRewritePattern { "vector.extract does not support rank 0 sources"); // "vector.extract %v[] : vector<...xf32>" can be canonicalized to %v. - if (extractOp.getPosition().empty()) + if (extractOp.getNumIndices() == 0) return failure(); // Rewrite vector.extract with 1d source to vector.extractelement. if (extractSrcType.getRank() == 1) { - assert(extractOp.getPosition().size() == 1 && "expected 1 index"); - int64_t pos = extractOp.getPosition()[0]; + if (extractOp.hasDynamicPosition()) + // TODO: Dinamic position not supported yet. + return failure(); + + assert(extractOp.getNumIndices() == 1 && "expected 1 index"); + int64_t pos = extractOp.getStaticPosition()[0]; rewriter.setInsertionPoint(extractOp); rewriter.replaceOpWithNewOp( extractOp, extractOp.getVector(), @@ -1070,7 +1074,7 @@ struct WarpOpExtract : public OpRewritePattern { Value distributedVec = newWarpOp->getResult(newRetIndices[0]); // Extract from distributed vector. Value newExtract = rewriter.create( - loc, distributedVec, extractOp.getPosition()); + loc, distributedVec, extractOp.getMixedPosition()); rewriter.replaceAllUsesWith(newWarpOp->getResult(operandNumber), newExtract); return success(); @@ -1096,7 +1100,7 @@ struct WarpOpExtract : public OpRewritePattern { SmallVector newDistributedShape(extractSrcType.getShape().begin(), extractSrcType.getShape().end()); for (int i = 0; i < distributedType.getRank(); ++i) - newDistributedShape[i + extractOp.getPosition().size()] = + newDistributedShape[i + extractOp.getNumIndices()] = distributedType.getDimSize(i); auto newDistributedType = VectorType::get(newDistributedShape, distributedType.getElementType()); @@ -1108,7 +1112,7 @@ struct WarpOpExtract : public OpRewritePattern { Value distributedVec = newWarpOp->getResult(newRetIndices[0]); // Extract from distributed vector. Value newExtract = rewriter.create( - loc, distributedVec, extractOp.getPosition()); + loc, distributedVec, extractOp.getMixedPosition()); rewriter.replaceAllUsesWith(newWarpOp->getResult(operandNumber), newExtract); return success(); @@ -1297,13 +1301,17 @@ struct WarpOpInsert : public OpRewritePattern { Location loc = insertOp.getLoc(); // "vector.insert %v, %v[] : ..." can be canonicalized to %v. - if (insertOp.getPosition().empty()) + if (insertOp.getNumIndices() == 0) return failure(); // Rewrite vector.insert with 1d dest to vector.insertelement. if (insertOp.getDestVectorType().getRank() == 1) { - assert(insertOp.getPosition().size() == 1 && "expected 1 index"); - int64_t pos = insertOp.getPosition()[0]; + if (insertOp.hasDynamicPosition()) + // TODO: Dinamic position not supported yet. + return failure(); + + assert(insertOp.getNumIndices() == 1 && "expected 1 index"); + int64_t pos = insertOp.getStaticPosition()[0]; rewriter.setInsertionPoint(insertOp); rewriter.replaceOpWithNewOp( insertOp, insertOp.getSource(), insertOp.getDest(), @@ -1323,7 +1331,7 @@ struct WarpOpInsert : public OpRewritePattern { Value distributedSrc = newWarpOp->getResult(newRetIndices[0]); Value distributedDest = newWarpOp->getResult(newRetIndices[1]); Value newResult = rewriter.create( - loc, distributedSrc, distributedDest, insertOp.getPosition()); + loc, distributedSrc, distributedDest, insertOp.getMixedPosition()); rewriter.replaceAllUsesWith(newWarpOp->getResult(operandNumber), newResult); return success(); @@ -1354,7 +1362,7 @@ struct WarpOpInsert : public OpRewritePattern { // Case 2: distrDestDim = 0 (dim of size 128) => distrSrcDim = -1. In that // case, one lane will insert the source vector<96xf32>. The other // lanes will not do anything. - int64_t distrSrcDim = distrDestDim - insertOp.getPosition().size(); + int64_t distrSrcDim = distrDestDim - insertOp.getNumIndices(); if (distrSrcDim >= 0) distrSrcShape[distrSrcDim] = distrDestType.getDimSize(distrDestDim); auto distrSrcType = @@ -1374,11 +1382,12 @@ struct WarpOpInsert : public OpRewritePattern { if (distrSrcDim >= 0) { // Every lane inserts a small piece. newResult = rewriter.create( - loc, distributedSrc, distributedDest, insertOp.getPosition()); + loc, distributedSrc, distributedDest, insertOp.getMixedPosition()); } else { // One lane inserts the entire source vector. int64_t elementsPerLane = distrDestType.getDimSize(distrDestDim); - SmallVector newPos(insertOp.getPosition()); + SmallVector pos = insertOp.getMixedPosition(); + SmallVector newPos = getAsIntegers(pos); // tid of inserting lane: pos / elementsPerLane Value insertingLane = rewriter.create( loc, newPos[distrDestDim] / elementsPerLane); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp index 913c826dd9124..6bbb293fa2a6b 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Utils/StructuredOpsUtils.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h" @@ -176,14 +177,16 @@ struct CastAwayInsertLeadingOneDim : public OpRewritePattern { // type has leading unit dims, we also trim the position array accordingly, // then (2) if source type also has leading unit dims, we need to append // zeroes to the position array accordingly. - unsigned oldPosRank = insertOp.getPosition().size(); + unsigned oldPosRank = insertOp.getNumIndices(); unsigned newPosRank = std::max(0, oldPosRank - dstDropCount); - SmallVector newPositions = - llvm::to_vector(insertOp.getPosition().take_back(newPosRank)); - newPositions.resize(newDstType.getRank() - newSrcRank, 0); + SmallVector oldPosition = insertOp.getMixedPosition(); + SmallVector newPosition = + llvm::to_vector(ArrayRef(oldPosition).take_back(newPosRank)); + newPosition.resize(newDstType.getRank() - newSrcRank, + rewriter.getI64IntegerAttr(0)); auto newInsertOp = rewriter.create( - loc, newDstType, newSrcVector, newDstVector, newPositions); + loc, newSrcVector, newDstVector, newPosition); rewriter.replaceOpWithNewOp(insertOp, oldDstType, newInsertOp); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp index f715c543eb179..603b88f11c8e0 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp @@ -707,10 +707,10 @@ class RewriteScalarExtractOfTransferRead auto xferOp = extractOp.getVector().getDefiningOp(); SmallVector newIndices(xferOp.getIndices().begin(), xferOp.getIndices().end()); - for (const auto &it : llvm::enumerate(extractOp.getPosition())) { - int64_t offset = it.value(); - int64_t idx = - newIndices.size() - extractOp.getPosition().size() + it.index(); + for (auto [i, pos] : llvm::enumerate(extractOp.getMixedPosition())) { + assert(pos.is() && "Unexpected non-constant index"); + int64_t offset = cast(pos.get()).getInt(); + int64_t idx = newIndices.size() - extractOp.getNumIndices() + i; OpFoldResult ofr = affine::makeComposedFoldedAffineApply( rewriter, extractOp.getLoc(), rewriter.getAffineSymbolExpr(0) + offset, {newIndices[idx]}); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp index b2a5aef5ee62d..b891d62ee508e 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp @@ -598,27 +598,34 @@ struct BubbleDownVectorBitCastForExtract unsigned expandRatio = castDstType.getNumElements() / castSrcType.getNumElements(); - uint64_t index = extractOp.getPosition()[0]; + auto getFirstIntValue = [](ArrayRef values) -> uint64_t { + assert(values[0].is() && "Unexpected non-constant index"); + return cast(values[0].get()).getInt(); + }; + + uint64_t index = getFirstIntValue(extractOp.getMixedPosition()); // Get the single scalar (as a vector) in the source value that packs the // desired scalar. E.g. extract vector<1xf32> from vector<4xf32> - VectorType oneScalarType = - VectorType::get({1}, castSrcType.getElementType()); + Location loc = extractOp.getLoc(); Value packedValue = rewriter.create( - extractOp.getLoc(), oneScalarType, castOp.getSource(), - index / expandRatio); + loc, castOp.getSource(), index / expandRatio); + Type packedVecType = VectorType::get(/*shape=*/{1}, packedValue.getType()); + Value zero = rewriter.create( + loc, packedVecType, rewriter.getZeroAttr(packedVecType)); + packedValue = rewriter.create(loc, packedValue, zero, + /*position=*/0); // Cast it to a vector with the desired scalar's type. // E.g. f32 -> vector<2xf16> VectorType packedType = VectorType::get({expandRatio}, castDstType.getElementType()); - Value castedValue = rewriter.create( - extractOp.getLoc(), packedType, packedValue); + Value castedValue = + rewriter.create(loc, packedType, packedValue); // Finally extract the desired scalar. - rewriter.replaceOpWithNewOp( - extractOp, extractOp.getType(), castedValue, index % expandRatio); - + rewriter.replaceOpWithNewOp(extractOp, castedValue, + index % expandRatio); return success(); } }; diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir index b07c4bd67be2d..41ab06f2e23b5 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -728,6 +728,17 @@ func.func @extract_element_from_vec_3d(%arg0: vector<4x3x16xf32>) -> f32 { // ----- +func.func @extract_element_with_value_1d(%arg0: vector<16xf32>, %arg1: index) -> f32 { + %0 = vector.extract %arg0[%arg1]: vector<16xf32> + return %0 : f32 +} +// CHECK-LABEL: @extract_element_with_value_1d +// CHECK-SAME: %[[VEC:.+]]: vector<16xf32>, %[[INDEX:.+]]: index +// CHECK: %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64 +// CHECK: llvm.extractelement %[[VEC]][%[[UC]] : i64] : vector<16xf32> + +// ----- + // CHECK-LABEL: @insert_element_0d // CHECK-SAME: %[[A:.*]]: f32, func.func @insert_element_0d(%a: f32, %b: vector) -> vector { @@ -830,6 +841,19 @@ func.func @insert_element_into_vec_3d(%arg0: f32, %arg1: vector<4x8x16xf32>) -> // ----- +func.func @insert_element_with_value_1d(%arg0: vector<16xf32>, %arg1: f32, %arg2: index) + -> vector<16xf32> { + %0 = vector.insert %arg1, %arg0[%arg2]: f32 into vector<16xf32> + return %0 : vector<16xf32> +} + +// CHECK-LABEL: @insert_element_with_value_1d +// CHECK-SAME: %[[DST:.+]]: vector<16xf32>, %[[SRC:.+]]: f32, %[[INDEX:.+]]: index +// CHECK: %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64 +// CHECK: llvm.insertelement %[[SRC]], %[[DST]][%[[UC]] : i64] : vector<16xf32> + +// ----- + func.func @vector_type_cast(%arg0: memref<8x8x8xf32>) -> memref> { %0 = vector.type_cast %arg0: memref<8x8x8xf32> to memref> return %0 : memref> diff --git a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir index f60a522cbfdba..266161d5268e9 100644 --- a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir +++ b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir @@ -155,8 +155,8 @@ func.func @broadcast(%arg0 : f32) -> (vector<4xf32>, vector<2xf32>) { // CHECK: spirv.CompositeExtract %[[ARG]][0 : i32] : vector<2xf32> // CHECK: spirv.CompositeExtract %[[ARG]][1 : i32] : vector<2xf32> func.func @extract(%arg0 : vector<2xf32>) -> (vector<1xf32>, f32) { - %0 = "vector.extract"(%arg0) <{position = array}> : (vector<2xf32>) -> vector<1xf32> - %1 = "vector.extract"(%arg0) <{position = array}> : (vector<2xf32>) -> f32 + %0 = "vector.extract"(%arg0) <{static_position = array}> : (vector<2xf32>) -> vector<1xf32> + %1 = "vector.extract"(%arg0) <{static_position = array}> : (vector<2xf32>) -> f32 return %0, %1: vector<1xf32>, f32 } diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir index 26772b9294935..549fe7a6a61f6 100644 --- a/mlir/test/Dialect/Vector/invalid.mlir +++ b/mlir/test/Dialect/Vector/invalid.mlir @@ -133,7 +133,7 @@ func.func @extract_position_rank_overflow(%arg0: vector<4x8x16xf32>) { func.func @extract_position_rank_overflow_generic(%arg0: vector<4x8x16xf32>) { // expected-error@+1 {{expected position attribute of rank no greater than vector rank}} - %1 = "vector.extract" (%arg0) <{position = array}> : (vector<4x8x16xf32>) -> (vector<16xf32>) + %1 = "vector.extract" (%arg0) <{static_position = array}> : (vector<4x8x16xf32>) -> (vector<16xf32>) } // ----- diff --git a/mlir/test/Dialect/Vector/ops.mlir b/mlir/test/Dialect/Vector/ops.mlir index 3a23ee14ca14f..f879cd122469a 100644 --- a/mlir/test/Dialect/Vector/ops.mlir +++ b/mlir/test/Dialect/Vector/ops.mlir @@ -206,8 +206,9 @@ func.func @extract_element(%a: vector<16xf32>) -> f32 { return %1 : f32 } -// CHECK-LABEL: @extract -func.func @extract(%arg0: vector<4x8x16xf32>) -> (vector<4x8x16xf32>, vector<8x16xf32>, vector<16xf32>, f32) { +// CHECK-LABEL: @extract_const_idx +func.func @extract_const_idx(%arg0: vector<4x8x16xf32>) + -> (vector<4x8x16xf32>, vector<8x16xf32>, vector<16xf32>, f32) { // CHECK: vector.extract {{.*}}[] : vector<4x8x16xf32> %0 = vector.extract %arg0[] : vector<4x8x16xf32> // CHECK: vector.extract {{.*}}[3] : vector<4x8x16xf32> @@ -219,6 +220,19 @@ func.func @extract(%arg0: vector<4x8x16xf32>) -> (vector<4x8x16xf32>, vector<8x1 return %0, %1, %2, %3 : vector<4x8x16xf32>, vector<8x16xf32>, vector<16xf32>, f32 } +// CHECK-LABEL: @extract_val_idx +// CHECK-SAME: %[[VEC:.+]]: vector<4x8x16xf32>, %[[IDX:.+]]: index +func.func @extract_val_idx(%arg0: vector<4x8x16xf32>, %idx: index) + -> (vector<8x16xf32>, vector<16xf32>, f32) { + // CHECK: vector.extract %[[VEC]][%[[IDX]]] : vector<4x8x16xf32> + %0 = vector.extract %arg0[%idx] : vector<4x8x16xf32> + // CHECK-NEXT: vector.extract %[[VEC]][%[[IDX]], %[[IDX]]] : vector<4x8x16xf32> + %1 = vector.extract %arg0[%idx, %idx] : vector<4x8x16xf32> + // CHECK-NEXT: vector.extract %[[VEC]][%[[IDX]], 5, %[[IDX]]] : vector<4x8x16xf32> + %2 = vector.extract %arg0[%idx, 5, %idx] : vector<4x8x16xf32> + return %0, %1, %2 : vector<8x16xf32>, vector<16xf32>, f32 +} + // CHECK-LABEL: @extract_0d func.func @extract_0d(%a: vector) -> f32 { // CHECK-NEXT: vector.extract %{{.*}}[] : vector @@ -242,8 +256,9 @@ func.func @insert_element(%a: f32, %b: vector<16xf32>) -> vector<16xf32> { return %1 : vector<16xf32> } -// CHECK-LABEL: @insert -func.func @insert(%a: f32, %b: vector<16xf32>, %c: vector<8x16xf32>, %res: vector<4x8x16xf32>) -> vector<4x8x16xf32> { +// CHECK-LABEL: @insert_const_idx +func.func @insert_const_idx(%a: f32, %b: vector<16xf32>, %c: vector<8x16xf32>, + %res: vector<4x8x16xf32>) -> vector<4x8x16xf32> { // CHECK: vector.insert %{{.*}}, %{{.*}}[3] : vector<8x16xf32> into vector<4x8x16xf32> %1 = vector.insert %c, %res[3] : vector<8x16xf32> into vector<4x8x16xf32> // CHECK: vector.insert %{{.*}}, %{{.*}}[3, 3] : vector<16xf32> into vector<4x8x16xf32> @@ -255,6 +270,19 @@ func.func @insert(%a: f32, %b: vector<16xf32>, %c: vector<8x16xf32>, %res: vecto return %4 : vector<4x8x16xf32> } +// CHECK-LABEL: @insert_val_idx +// CHECK-SAME: %[[A:.+]]: f32, %[[B:.+]]: vector<16xf32>, %[[C:.+]]: vector<8x16xf32>, %[[IDX:.+]]: index +func.func @insert_val_idx(%a: f32, %b: vector<16xf32>, %c: vector<8x16xf32>, + %idx: index, %res: vector<4x8x16xf32>) -> vector<4x8x16xf32> { + // CHECK: vector.insert %[[C]], %{{.*}}[%[[IDX]]] : vector<8x16xf32> into vector<4x8x16xf32> + %0 = vector.insert %c, %res[%idx] : vector<8x16xf32> into vector<4x8x16xf32> + // CHECK: vector.insert %[[B]], %{{.*}}[%[[IDX]], %[[IDX]]] : vector<16xf32> into vector<4x8x16xf32> + %1 = vector.insert %b, %res[%idx, %idx] : vector<16xf32> into vector<4x8x16xf32> + // CHECK: vector.insert %[[A]], %{{.*}}[%[[IDX]], 5, %[[IDX]]] : f32 into vector<4x8x16xf32> + %2 = vector.insert %a, %res[%idx, 5, %idx] : f32 into vector<4x8x16xf32> + return %2 : vector<4x8x16xf32> +} + // CHECK-LABEL: @insert_0d func.func @insert_0d(%a: f32, %b: vector, %c: vector<2x3xf32>) -> (vector, vector<2x3xf32>) { // CHECK-NEXT: vector.insert %{{.*}}, %{{.*}}[] : f32 into vector @@ -1007,7 +1035,7 @@ func.func @contraction_masked_scalable(%A: vector<3x4xf32>, %C: vector<3x[8]xf32>, %M : vector<3x[8]x4xi1>) -> vector<3x[8]xf32> { // CHECK: vector.mask %[[M]] { vector.contract {indexing_maps = [#{{.*}}, #{{.*}}, #{{.*}}], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind} %[[A]], %[[B]], %[[C]] : vector<3x4xf32>, vector<4x[8]xf32> into vector<3x[8]xf32> } : vector<3x[8]x4xi1> -> vector<3x[8]xf32> - %0 = vector.mask %M { vector.contract #matmat_trait %A, %B, %C : vector<3x4xf32>, vector<4x[8]xf32> into vector<3x[8]xf32> } + %0 = vector.mask %M { vector.contract #matmat_trait %A, %B, %C : vector<3x4xf32>, vector<4x[8]xf32> into vector<3x[8]xf32> } : vector<3x[8]x4xi1> -> vector<3x[8]xf32> return %0 : vector<3x[8]xf32> } diff --git a/mlir/test/Dialect/Vector/vector-transforms.mlir b/mlir/test/Dialect/Vector/vector-transforms.mlir index dfc564ca6fe48..27bbe1bb0d034 100644 --- a/mlir/test/Dialect/Vector/vector-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-transforms.mlir @@ -286,11 +286,13 @@ func.func @contraction4x4_ikj_xfer_read_tensor(%arg0 : tensor<4x2xf32>, func.func @bubble_down_bitcast_in_extract(%src: vector<4xf32>) -> (f16, f16) { %0 = vector.bitcast %src : vector<4xf32> to vector<8xf16> // CHECK: %[[EXTRACT1:.+]] = vector.extract %[[SRC]][1] : vector<4xf32> - // CHECK: %[[CAST1:.+]] = vector.bitcast %[[EXTRACT1]] : vector<1xf32> to vector<2xf16> + // CHECK: %[[INSERT1:.+]] = vector.insert %[[EXTRACT1]], %{{.+}} [0] : f32 into vector<1xf32> + // CHECK: %[[CAST1:.+]] = vector.bitcast %[[INSERT1]] : vector<1xf32> to vector<2xf16> // CHECK: %[[EXTRACT2:.+]] = vector.extract %[[CAST1]][1] : vector<2xf16> %1 = vector.extract %0[3] : vector<8xf16> // CHECK: %[[EXTRACT3:.+]] = vector.extract %[[SRC]][2] : vector<4xf32> - // CHECK: %[[CAST2:.+]] = vector.bitcast %[[EXTRACT3]] : vector<1xf32> to vector<2xf16> + // CHECK: %[[INSERT3:.+]] = vector.insert %[[EXTRACT3]], %{{.+}} [0] : f32 into vector<1xf32> + // CHECK: %[[CAST2:.+]] = vector.bitcast %[[INSERT3]] : vector<1xf32> to vector<2xf16> // CHECK: %[[EXTRACT4:.+]] = vector.extract %[[CAST2]][0] : vector<2xf16> %2 = vector.extract %0[4] : vector<8xf16> // CHECK: return %[[EXTRACT2]], %[[EXTRACT4]]