-
Notifications
You must be signed in to change notification settings - Fork 12.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[AMDGPU] Use 32-bit SGPR to save/restore of SCC #68367
base: main
Are you sure you want to change the base?
[AMDGPU] Use 32-bit SGPR to save/restore of SCC #68367
Conversation
@llvm/pr-subscribers-backend-amdgpu ChangesThe first of the two patches will save/restore SCC across waterfall loop. Full diff: https://github.com/llvm/llvm-project/pull/68367.diff 4 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 2216acf128bfa56..77852b2b821e04b 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -1071,7 +1071,6 @@ void SIFixSGPRCopies::lowerVGPR2SGPRCopies(MachineFunction &MF) {
}
void SIFixSGPRCopies::fixSCCCopies(MachineFunction &MF) {
- bool IsWave32 = MF.getSubtarget<GCNSubtarget>().isWave32();
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
++BI) {
MachineBasicBlock *MBB = &*BI;
@@ -1084,13 +1083,19 @@ void SIFixSGPRCopies::fixSCCCopies(MachineFunction &MF) {
Register SrcReg = MI.getOperand(1).getReg();
Register DstReg = MI.getOperand(0).getReg();
if (SrcReg == AMDGPU::SCC) {
- Register SCCCopy = MRI->createVirtualRegister(
- TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID));
+ const TargetRegisterClass *DstRC =
+ TRI->getRegClassForOperandReg(*MRI, MI.getOperand(0));
+ assert((TRI->getRegSizeInBits(*DstRC) == 64 ||
+ TRI->getRegSizeInBits(*DstRC) == 32) &&
+ "Expected SCC dst to be 64 or 32 bits");
+ bool IsDst32Bit = TRI->getRegSizeInBits(*DstRC) == 32;
+ Register SCCCopy =
+ IsDst32Bit ? MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass)
+ : MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned Opcode =
+ IsDst32Bit ? AMDGPU::S_CSELECT_B32 : AMDGPU::S_CSELECT_B64;
I = BuildMI(*MI.getParent(), std::next(MachineBasicBlock::iterator(MI)),
- MI.getDebugLoc(),
- TII->get(IsWave32 ? AMDGPU::S_CSELECT_B32
- : AMDGPU::S_CSELECT_B64),
- SCCCopy)
+ MI.getDebugLoc(), TII->get(Opcode), SCCCopy)
.addImm(-1)
.addImm(0);
I = BuildMI(*MI.getParent(), std::next(I), I->getDebugLoc(),
@@ -1100,9 +1105,17 @@ void SIFixSGPRCopies::fixSCCCopies(MachineFunction &MF) {
continue;
}
if (DstReg == AMDGPU::SCC) {
- unsigned Opcode = IsWave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
- Register Exec = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
- Register Tmp = MRI->createVirtualRegister(TRI->getBoolRC());
+ const TargetRegisterClass *SrcRC =
+ TRI->getRegClassForOperandReg(*MRI, MI.getOperand(1));
+ assert((TRI->getRegSizeInBits(*SrcRC) == 64 ||
+ TRI->getRegSizeInBits(*SrcRC) == 32) &&
+ "Expected SCC src to be 64 or 32 bits");
+ bool IsSrc32Bit = TRI->getRegSizeInBits(*SrcRC) == 32;
+ unsigned Opcode = IsSrc32Bit ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
+ Register Exec = IsSrc32Bit ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
+ Register Tmp =
+ IsSrc32Bit ? MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass)
+ : MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
I = BuildMI(*MI.getParent(), std::next(MachineBasicBlock::iterator(MI)),
MI.getDebugLoc(), TII->get(Opcode))
.addReg(Tmp, getDefRegState(true))
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 792f4695d288b5f..f50321e6538d02a 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -5094,6 +5094,39 @@ unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const {
"Unexpected scalar opcode without corresponding vector one!");
}
+bool SIInstrInfo::isSCCDefinedBefore(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Before) const {
+
+ for (MachineBasicBlock::iterator I = Before, B = MBB.begin(); I != B; --I) {
+ MachineInstr &MI = *I;
+ if (!MI.hasImplicitDef())
+ continue;
+ for (MachineOperand &Op : MI.implicit_operands()) {
+ if (Op.getReg() == AMDGPU::SCC && Op.isDef() && !Op.isDead())
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SIInstrInfo::isSCCUsedAfter(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator After) const {
+ for (MachineBasicBlock::iterator I = After, E = MBB.end(); I != E; ++I) {
+ MachineInstr &MI = *I;
+ if (MI.hasRegisterImplicitUseOperand(AMDGPU::SCC))
+ return true;
+ }
+ return false;
+}
+
+bool SIInstrInfo::isSCCDefinedAndUsed(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Before,
+ MachineBasicBlock::iterator After) const {
+ if (isSCCDefinedBefore(MBB, Before) && isSCCUsedAfter(MBB, After))
+ return true;
+ return false;
+}
+
void SIInstrInfo::insertScratchExecCopy(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -6014,6 +6047,15 @@ loadMBUFScalarOperandsFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
+ // Save SCC. Waterfall Loop may overwrite SCC.
+ Register SaveSCCReg;
+ bool SCCDefined = false;
+ if ((SCCDefined = TII.isSCCDefinedAndUsed(MBB, Begin, End))) {
+ SaveSCCReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ BuildMI(MBB, Begin, DL, TII.get(AMDGPU::COPY), SaveSCCReg)
+ .addReg(AMDGPU::SCC);
+ }
+
Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
// Save the EXEC mask
@@ -6069,8 +6111,14 @@ loadMBUFScalarOperandsFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
emitLoadScalarOpsFromVGPRLoop(TII, MRI, MBB, *LoopBB, *BodyBB, DL, ScalarOps);
- // Restore the EXEC mask
MachineBasicBlock::iterator First = RemainderBB->begin();
+ // Restore SCC
+ if (SCCDefined) {
+ BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
+ .addReg(SaveSCCReg);
+ }
+
+ // Restore the EXEC mask
BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec);
return BodyBB;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index a4f59fc3513d646..3a347017f1c85a1 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -960,6 +960,19 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
unsigned getVALUOp(const MachineInstr &MI) const;
+ /// Return true if SCC is deinfed and not dead
+ /// from Before to beginning of MBB
+ bool isSCCDefinedBefore(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Before) const;
+
+ /// Return true if SCC is used from After to end of MBB
+ bool isSCCUsedAfter(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator After) const;
+
+ bool isSCCDefinedAndUsed(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Before,
+ MachineBasicBlock::iterator After) const;
+
void insertScratchExecCopy(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, Register Reg, bool IsSCCLive,
diff --git a/llvm/test/CodeGen/AMDGPU/waterfall_kills_scc.ll b/llvm/test/CodeGen/AMDGPU/waterfall_kills_scc.ll
new file mode 100644
index 000000000000000..9fe9ab1fc889cac
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/waterfall_kills_scc.ll
@@ -0,0 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX906 %s
+declare float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32>, i32, i32, i32 immarg) #0
+declare void @llvm.amdgcn.raw.buffer.store.f32(float, <4 x i32>, i32, i32, i32 immarg) #1
+
+; Check that the compiler doesn't crash with a "undefined physical register" error;
+; bb.0 sets SCC bit in s_cmp_eq_u32 s0, 1
+; bb.1 overrides it
+; bb.2 uses the value from bb.0
+; Preserve SCC across bb.1 with s_cselect_b32 s5, -1, 0 -> s_and_b32 s0, s5, exec_lo
+; Otherwise, we will see following error.
+;*** Bad machine code: Using an undefined physical register ***
+;- function: foo
+;- basic block: %bb.3 (0x53198c0)
+;- instruction: %33.sub1:sgpr_128 = S_CSELECT_B32 1072693248, 0, implicit $scc
+;- operand 3: implicit $scc
+
+
+define amdgpu_kernel void @foo(i1 %cmp1) {
+; GFX906-LABEL: foo:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX906-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX906-NEXT: s_mov_b32 s10, -1
+; GFX906-NEXT: s_mov_b32 s11, 0xe00000
+; GFX906-NEXT: s_add_u32 s8, s8, s3
+; GFX906-NEXT: s_addc_u32 s9, s9, 0
+; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0
+; GFX906-NEXT: buffer_load_dword v4, off, s[8:11], 0 offset:4
+; GFX906-NEXT: buffer_load_dword v5, off, s[8:11], 0 offset:8
+; GFX906-NEXT: buffer_load_dword v6, off, s[8:11], 0 offset:12
+; GFX906-NEXT: s_load_dword s4, s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x1c
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: s_bitcmp1_b32 s4, 0
+; GFX906-NEXT: s_mul_i32 s0, s2, s3
+; GFX906-NEXT: v_mul_u32_u24_e32 v1, s3, v1
+; GFX906-NEXT: v_mad_u32_u24 v0, s0, v0, v1
+; GFX906-NEXT: v_add_lshl_u32 v2, v0, v2, 4
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_mov_b32 s4, 0
+; GFX906-NEXT: v_mov_b32_e32 v1, v0
+; GFX906-NEXT: s_cselect_b32 s5, -1, 0
+; GFX906-NEXT: s_mov_b64 s[2:3], exec
+; GFX906-NEXT: ds_write_b64 v2, v[0:1]
+; GFX906-NEXT: .LBB0_1: ; =>This Inner Loop Header: Depth=1
+; GFX906-NEXT: s_waitcnt vmcnt(3)
+; GFX906-NEXT: v_readfirstlane_b32 s0, v3
+; GFX906-NEXT: s_waitcnt vmcnt(2)
+; GFX906-NEXT: v_readfirstlane_b32 s1, v4
+; GFX906-NEXT: v_cmp_eq_u64_e32 vcc, s[0:1], v[3:4]
+; GFX906-NEXT: s_waitcnt vmcnt(1)
+; GFX906-NEXT: v_readfirstlane_b32 s0, v5
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_readfirstlane_b32 s1, v6
+; GFX906-NEXT: v_cmp_eq_u64_e64 s[0:1], s[0:1], v[5:6]
+; GFX906-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
+; GFX906-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6
+; GFX906-NEXT: s_xor_b64 exec, exec, s[0:1]
+; GFX906-NEXT: s_cbranch_execnz .LBB0_1
+; GFX906-NEXT: ; %bb.2:
+; GFX906-NEXT: s_and_b32 s0, s5, exec_lo
+; GFX906-NEXT: s_mov_b64 exec, s[2:3]
+; GFX906-NEXT: s_cselect_b32 s5, 0x3ff00000, 0
+; GFX906-NEXT: v_cvt_f32_f64_e32 v0, s[4:5]
+; GFX906-NEXT: s_mov_b32 s5, s4
+; GFX906-NEXT: s_mov_b32 s6, s4
+; GFX906-NEXT: s_mov_b32 s7, s4
+; GFX906-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX906-NEXT: s_endpgm
+entry:
+ %wbr = alloca <4 x i32>, align 16, addrspace(5)
+ store ptr null, ptr addrspace(5) %wbr, align 16
+ %wbr_1 = load <4 x i32>, ptr addrspace(5) null, align 16
+ %call1 = tail call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %wbr_1, i32 0, i32 0, i32 0)
+ %0 = fpext float %call1 to double
+ %sel1 = select i1 %cmp1, double 1.000000e+00, double 0.000000e+00
+ %sel2 = select i1 %cmp1, double %0, double 0.000000e+00
+ %mul = fmul double %sel2, 0.000000e+00
+ %fptruncate = fptrunc double %sel1 to float
+ tail call void @llvm.amdgcn.raw.buffer.store.f32(float %fptruncate, <4 x i32> zeroinitializer, i32 0, i32 0, i32 0)
+ ret void
+}
+
+attributes #0 = { nocallback nofree nosync nounwind willreturn memory(read) }
+attributes #1 = { nocallback nofree nosync nounwind willreturn memory(write) }
|
If I understand correctly, the intent is to make the lowering of the copies from/to SCC be driven by the size of the dst/src register, instead of the wave size, to reduce register pressure when the second 32 bit reg is not needed? If so I think the patch needs a better title/description, I initially thought it was always using a 32 bit register. Something like |
That is the intent of this patch. Will change the title |
SCC a bit in 32-bit STATUS register. Unless COPY's source or destination is 64-bit, there is no need to use 64bit register. Otherwise, it will just tie up a register unnecessarily, which may cause register pressure in later passes.
fbfe706
to
144ee78
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
SIFixSGPRCopies is supposed to fix "illegal" VGPR->SGPR copies. Apparently it also fixes VGPR<->SCC copies. But I do not think it should touch SGPR<->SCC copies. Those are perfectly legal and will be handled by SIInstrInfo::copyPhysReg.
So a better fix would be to teach fixSCCCopies not to touch SGPR<->SCC copies.
I can open another JIRA ticket to fix SIFixSGPRCopies. Not only does SIFixSGPRCopies needs to be rewritten for SCC Copies, there is also |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The pre-existing code here is simply not sensible. It doesn't make sense to unconditionally replace SCC copies with a similar looking vector operation. For the purposes of the local change, the minimal fix is to just directly emit the s_cselect_b32. We should also untangle the current handling, because this really should just emit a copy
Can this be closed since #68363 landed? |
The first of the two patches will save/restore SCC across waterfall loop.
The second patch will use only 32-bit SGPR to save/restore SCC.