diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index a508f9e5914c8..e31da61d15085 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -8919,16 +8919,8 @@ unsigned SIInstrInfo::getLiveRangeSplitOpcode(Register SrcReg, } bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { - // We need to handle instructions which may be inserted during register - // allocation to handle the prolog. The initial prolog instruction may have - // been separated from the start of the block by spills and copies inserted - // needed by the prolog. - uint16_t Opc = MI.getOpcode(); - - // FIXME: Copies inserted in the block prolog for live-range split should also - // be included. - return (isSpillOpcode(Opc) || (!MI.isTerminator() && Opc != AMDGPU::COPY && - MI.modifiesRegister(AMDGPU::EXEC, &RI))); + return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && + MI.modifiesRegister(AMDGPU::EXEC, &RI); } MachineInstrBuilder diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/image-waterfall-loop-O0.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/image-waterfall-loop-O0.ll index 10cbc56cc5fbe..b19230c2e876c 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/image-waterfall-loop-O0.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/image-waterfall-loop-O0.ll @@ -144,6 +144,8 @@ define <4 x float> @waterfall_loop(<8 x i32> %vgpr_srd) { ; CHECK-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill ; CHECK-NEXT: s_mov_b32 exec_lo, s21 ; CHECK-NEXT: ; %bb.2: ; in Loop: Header=BB0_1 Depth=1 +; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; CHECK-NEXT: s_or_saveexec_b32 s21, -1 ; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload ; CHECK-NEXT: s_mov_b32 exec_lo, s21 @@ -161,9 +163,6 @@ define <4 x float> @waterfall_loop(<8 x i32> %vgpr_srd) { ; CHECK-NEXT: v_readlane_b32 s17, v2, 1 ; CHECK-NEXT: v_readlane_b32 s18, v2, 2 ; CHECK-NEXT: v_readlane_b32 s19, v2, 3 -; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: image_sample v0, v[0:1], s[8:15], s[16:19] dmask:0x1 dim:SQ_RSRC_IMG_2D ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill diff --git a/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll b/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll index 4915ea4029860..790fdb331bbd0 100644 --- a/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll +++ b/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll @@ -149,8 +149,8 @@ define { i32, half } @call_split_type_used_outside_block_struct() #0 { ; GCN-NEXT: s_addc_u32 s17, s17, func_struct@rel32@hi+12 ; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17] ; GCN-NEXT: v_readlane_b32 s30, v40, 0 -; GCN-NEXT: v_readlane_b32 s31, v40, 1 ; GCN-NEXT: v_mov_b32_e32 v1, v4 +; GCN-NEXT: v_readlane_b32 s31, v40, 1 ; GCN-NEXT: v_readlane_b32 s4, v40, 2 ; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1 ; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload diff --git a/llvm/test/CodeGen/AMDGPU/ra-inserted-scalar-instructions.mir b/llvm/test/CodeGen/AMDGPU/ra-inserted-scalar-instructions.mir deleted file mode 100644 index dca9ffad7e800..0000000000000 --- a/llvm/test/CodeGen/AMDGPU/ra-inserted-scalar-instructions.mir +++ /dev/null @@ -1,603 +0,0 @@ -# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 -# RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 -run-pass=greedy --stress-regalloc=6 --verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s - -# The spills/copies during RA for scalar register block LiveIns should be inserted at the beginning of the block. -# The COPY inserted in bb.9 during liverange split should precede the SPILL that was inserted earlier in the flow. - ---- -name: test_kernel -tracksRegLiveness: true -registers: - - { id: 0, class: vgpr_32 } - - { id: 1, class: vreg_64 } - - { id: 2, class: sgpr_32 } - - { id: 3, class: sreg_32 } - - { id: 4, class: sreg_32 } - - { id: 5, class: sreg_32 } - - { id: 6, class: sgpr_256 } - - { id: 7, class: sgpr_256 } - - { id: 8, class: sgpr_256 } - - { id: 9, class: sgpr_256 } - - { id: 10, class: sgpr_256 } - - { id: 11, class: sreg_32_xm0_xexec } - - { id: 12, class: sreg_32_xm0_xexec } - - { id: 13, class: sgpr_64 } - - { id: 14, class: sreg_32_xm0_xexec } - - { id: 15, class: sreg_32 } - - { id: 16, class: sreg_32 } - - { id: 17, class: sreg_32 } - - { id: 18, class: sreg_32 } - - { id: 19, class: sreg_32 } - - { id: 20, class: sreg_32 } - - { id: 21, class: sreg_32 } - - { id: 22, class: sreg_32 } - - { id: 23, class: sreg_32 } - - { id: 24, class: sreg_32 } - - { id: 25, class: sreg_32 } - - { id: 26, class: sreg_32 } - - { id: 27, class: sreg_32 } - - { id: 28, class: sreg_32 } - - { id: 29, class: sreg_32 } - - { id: 30, class: sreg_32 } - - { id: 31, class: sreg_32 } - - { id: 32, class: sreg_32 } - - { id: 33, class: sreg_32 } - - { id: 34, class: sreg_32 } - - { id: 35, class: sreg_32 } - - { id: 36, class: sreg_32 } - - { id: 37, class: sreg_32 } - - { id: 38, class: sreg_32 } - - { id: 39, class: sreg_32 } - - { id: 40, class: sreg_32 } - - { id: 41, class: sreg_32 } - - { id: 42, class: sreg_32 } - - { id: 43, class: sreg_32 } - - { id: 44, class: sreg_32 } - - { id: 45, class: sreg_32 } - - { id: 46, class: sreg_32 } - - { id: 47, class: sreg_32 } - - { id: 48, class: sreg_32 } - - { id: 49, class: sreg_32 } - - { id: 50, class: sreg_32 } - - { id: 51, class: sreg_32 } - - { id: 52, class: sreg_32 } - - { id: 53, class: sreg_32 } - - { id: 54, class: sreg_32 } - - { id: 55, class: sreg_32 } - - { id: 56, class: sreg_32 } - - { id: 57, class: sreg_32 } - - { id: 58, class: sreg_32 } - - { id: 59, class: sreg_32 } - - { id: 60, class: sreg_32 } - - { id: 61, class: sreg_32 } - - { id: 62, class: sreg_32 } - - { id: 63, class: sreg_32 } - - { id: 64, class: sreg_32 } - - { id: 65, class: sreg_32 } - - { id: 66, class: sreg_32 } - - { id: 67, class: sreg_32 } - - { id: 68, class: sreg_32 } - - { id: 69, class: sreg_32 } - - { id: 70, class: sreg_32 } - - { id: 71, class: sreg_32 } - - { id: 72, class: sreg_32 } - - { id: 73, class: sreg_32 } - - { id: 74, class: sreg_32 } - - { id: 75, class: sreg_32 } - - { id: 76, class: sreg_32 } - - { id: 77, class: sreg_32 } - - { id: 78, class: sreg_32 } - - { id: 79, class: sreg_32 } - - { id: 80, class: sreg_32 } - - { id: 81, class: sreg_32 } - - { id: 82, class: sreg_32 } - - { id: 83, class: sreg_32 } - - { id: 84, class: sreg_32 } - - { id: 85, class: sreg_32 } - - { id: 86, class: sreg_32 } - - { id: 87, class: sreg_32 } - - { id: 88, class: sreg_32 } - - { id: 89, class: sreg_32 } - - { id: 90, class: sreg_32 } - - { id: 91, class: sreg_32 } - - { id: 92, class: sreg_32 } - - { id: 93, class: sgpr_64 } - - { id: 94, class: sreg_32_xm0_xexec } - - { id: 95, class: sgpr_32 } - - { id: 96, class: sreg_32_xm0_xexec } - - { id: 97, class: sreg_64 } - - { id: 98, class: sreg_32_xm0_xexec } - - { id: 99, class: sreg_32_xm0_xexec } - - { id: 100, class: sreg_64 } - - { id: 101, class: sgpr_128 } - - { id: 102, class: sreg_64_xexec } - - { id: 103, class: sgpr_32 } - - { id: 104, class: sgpr_64 } - - { id: 105, class: sgpr_64 } - - { id: 106, class: sgpr_64 } - - { id: 107, class: sreg_32, preferred-register: '$vcc' } - - { id: 108, class: sreg_32, preferred-register: '$vcc' } - - { id: 109, class: sgpr_32 } - - { id: 110, class: sgpr_256 } - - { id: 111, class: sgpr_512 } - - { id: 112, class: sgpr_512 } - - { id: 113, class: sgpr_256 } - - { id: 114, class: sgpr_256 } - - { id: 115, class: sgpr_256 } - - { id: 116, class: sreg_32_xm0_xexec } -machineFunctionInfo: - maxKernArgAlign: 1 - isEntryFunction: true - stackPtrOffsetReg: '$sgpr32' - sgprForEXECCopy: '$sgpr105' -body: | - ; GCN-LABEL: name: test_kernel - ; GCN: bb.0: - ; GCN-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; GCN-NEXT: liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13 - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: dead [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF - ; GCN-NEXT: dead undef [[DEF1:%[0-9]+]].sub1:vreg_64 = IMPLICIT_DEF - ; GCN-NEXT: SI_SPILL_S32_SAVE $sgpr1, %stack.15, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.15, addrspace 5) - ; GCN-NEXT: undef [[COPY:%[0-9]+]].sub1:sgpr_64 = COPY $sgpr0 - ; GCN-NEXT: SI_SPILL_S64_SAVE [[COPY]], %stack.2, implicit $exec, implicit $sgpr32 :: (store (s64) into %stack.2, align 4, addrspace 5) - ; GCN-NEXT: undef [[V_READFIRSTLANE_B32_:%[0-9]+]].sub0:sgpr_64 = V_READFIRSTLANE_B32 undef [[DEF]], implicit $exec - ; GCN-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]].sub1:sgpr_64 = V_READFIRSTLANE_B32 undef [[DEF]], implicit $exec - ; GCN-NEXT: undef [[V_READFIRSTLANE_B32_1:%[0-9]+]].sub0:sgpr_64 = V_READFIRSTLANE_B32 undef [[DEF]], implicit $exec - ; GCN-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]].sub1:sgpr_64 = IMPLICIT_DEF - ; GCN-NEXT: SI_SPILL_S64_SAVE [[V_READFIRSTLANE_B32_1]], %stack.19, implicit $exec, implicit $sgpr32 :: (store (s64) into %stack.19, align 4, addrspace 5) - ; GCN-NEXT: undef [[V_READFIRSTLANE_B32_2:%[0-9]+]].sub0:sgpr_64 = V_READFIRSTLANE_B32 undef [[DEF]], implicit $exec - ; GCN-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]].sub1:sgpr_64 = V_READFIRSTLANE_B32 undef [[DEF]], implicit $exec - ; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 0 - ; GCN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 0 - ; GCN-NEXT: SI_SPILL_S32_SAVE [[S_MOV_B32_1]], %stack.17, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.17, addrspace 5) - ; GCN-NEXT: S_CBRANCH_SCC1 %bb.2, implicit undef $scc - ; GCN-NEXT: S_BRANCH %bb.1 - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.1: - ; GCN-NEXT: successors: %bb.2(0x80000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: [[DEF2:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: KILL [[DEF2]] - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.2: - ; GCN-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: [[S_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM undef [[V_READFIRSTLANE_B32_2]], 132, 0 :: ("amdgpu-noclobber" load (s128), align 8, addrspace 1) - ; GCN-NEXT: SI_SPILL_S128_SAVE [[S_LOAD_DWORDX4_IMM]], %stack.14, implicit $exec, implicit $sgpr32 :: (store (s128) into %stack.14, align 4, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef [[V_READFIRSTLANE_B32_2]], 188, 0 :: ("amdgpu-noclobber" load (s256), align 8, addrspace 1) - ; GCN-NEXT: S_CBRANCH_SCC1 %bb.4, implicit undef $scc - ; GCN-NEXT: S_BRANCH %bb.3 - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.3: - ; GCN-NEXT: successors: %bb.4(0x80000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 -1 - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.4: - ; GCN-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: SI_SPILL_S32_SAVE [[S_MOV_B32_]], %stack.9, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.9, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM undef [[V_READFIRSTLANE_B32_2]], 120, 0 :: ("amdgpu-noclobber" load (s64), align 16, addrspace 1) - ; GCN-NEXT: SI_SPILL_S64_SAVE [[S_LOAD_DWORDX2_IMM]], %stack.18, implicit $exec, implicit $sgpr32 :: (store (s64) into %stack.18, align 4, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORDX8_IMM1:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef [[V_READFIRSTLANE_B32_2]], 352, 0 :: ("amdgpu-noclobber" load (s256), align 16, addrspace 1) - ; GCN-NEXT: SI_SPILL_S256_SAVE [[S_LOAD_DWORDX8_IMM1]], %stack.10, implicit $exec, implicit $sgpr32 :: (store (s256) into %stack.10, align 4, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %97:sreg_64, 0, 0 - ; GCN-NEXT: SI_SPILL_S32_SAVE [[S_LOAD_DWORD_IMM]], %stack.11, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.11, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORDX8_IMM2:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef [[V_READFIRSTLANE_B32_2]], 652, 0 :: ("amdgpu-noclobber" load (s256), align 8, addrspace 1) - ; GCN-NEXT: SI_SPILL_S256_SAVE [[S_LOAD_DWORDX8_IMM2]], %stack.6, implicit $exec, implicit $sgpr32 :: (store (s256) into %stack.6, align 4, addrspace 5) - ; GCN-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0 - ; GCN-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_MOV_B64_]], 0, 0 :: ("amdgpu-noclobber" load (s32), align 8, addrspace 1) - ; GCN-NEXT: SI_SPILL_S32_SAVE [[S_LOAD_DWORD_IMM1]], %stack.3, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.3, addrspace 5) - ; GCN-NEXT: SI_SPILL_S64_SAVE [[V_READFIRSTLANE_B32_2]], %stack.1, implicit $exec, implicit $sgpr32 :: (store (s64) into %stack.1, align 4, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORDX8_IMM3:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM [[V_READFIRSTLANE_B32_2]], 688, 0 :: ("amdgpu-noclobber" load (s256), align 16, addrspace 1) - ; GCN-NEXT: SI_SPILL_S256_SAVE [[S_LOAD_DWORDX8_IMM3]], %stack.4, implicit $exec, implicit $sgpr32 :: (store (s256) into %stack.4, align 4, addrspace 5) - ; GCN-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 - ; GCN-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 0 - ; GCN-NEXT: S_CBRANCH_SCC1 %bb.6, implicit undef $scc - ; GCN-NEXT: S_BRANCH %bb.5 - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.5: - ; GCN-NEXT: successors: %bb.6(0x80000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 -1 - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.6: - ; GCN-NEXT: successors: %bb.7(0x40000000), %bb.10(0x40000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: SI_SPILL_S32_SAVE [[S_MOV_B32_3]], %stack.5, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.5, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sgpr_32 = S_LOAD_DWORD_IMM undef %123:sgpr_64, 0, 0 :: ("amdgpu-noclobber" load (s32), align 16, addrspace 1) - ; GCN-NEXT: [[S_LOAD_DWORDX8_IMM4:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %124:sgpr_64, 152, 0 :: ("amdgpu-noclobber" load (s256), align 4, addrspace 1) - ; GCN-NEXT: SI_SPILL_S256_SAVE [[S_LOAD_DWORDX8_IMM4]], %stack.20, implicit $exec, implicit $sgpr32 :: (store (s256) into %stack.20, align 4, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORDX8_IMM5:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %125:sgpr_64, 220, 0 :: ("amdgpu-noclobber" load (s256), align 4, addrspace 1) - ; GCN-NEXT: SI_SPILL_S256_SAVE [[S_LOAD_DWORDX8_IMM5]], %stack.16, implicit $exec, implicit $sgpr32 :: (store (s256) into %stack.16, align 4, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORDX8_IMM6:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %126:sgpr_64, 384, 0 :: ("amdgpu-noclobber" load (s256), align 4, addrspace 1) - ; GCN-NEXT: SI_SPILL_S256_SAVE [[S_LOAD_DWORDX8_IMM6]], %stack.13, implicit $exec, implicit $sgpr32 :: (store (s256) into %stack.13, align 4, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORDX16_IMM:%[0-9]+]]:sgpr_512 = S_LOAD_DWORDX16_IMM undef %127:sgpr_64, 440, 0 :: ("amdgpu-noclobber" load (s512), align 8, addrspace 1) - ; GCN-NEXT: [[S_LOAD_DWORDX16_IMM1:%[0-9]+]]:sgpr_512 = S_LOAD_DWORDX16_IMM undef %128:sgpr_64, 584, 0 :: ("amdgpu-noclobber" load (s512), align 16, addrspace 1) - ; GCN-NEXT: SI_SPILL_S512_SAVE [[S_LOAD_DWORDX16_IMM1]], %stack.12, implicit $exec, implicit $sgpr32 :: (store (s512) into %stack.12, align 4, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORDX8_IMM7:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM [[V_READFIRSTLANE_B32_]], 156, 0 :: ("amdgpu-noclobber" load (s256), align 8, addrspace 1) - ; GCN-NEXT: SI_SPILL_S256_SAVE [[S_LOAD_DWORDX8_IMM7]], %stack.8, implicit $exec, implicit $sgpr32 :: (store (s256) into %stack.8, align 4, addrspace 5) - ; GCN-NEXT: [[SI_SPILL_S64_RESTORE:%[0-9]+]]:sgpr_64 = SI_SPILL_S64_RESTORE %stack.19, implicit $exec, implicit $sgpr32 :: (load (s64) from %stack.19, align 4, addrspace 5) - ; GCN-NEXT: [[S_LOAD_DWORD_IMM3:%[0-9]+]]:sgpr_32 = S_LOAD_DWORD_IMM [[SI_SPILL_S64_RESTORE]], 0, 0 :: ("amdgpu-noclobber" load (s32), align 8, addrspace 1) - ; GCN-NEXT: SI_SPILL_S32_SAVE [[S_LOAD_DWORD_IMM3]], %stack.7, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.7, addrspace 5) - ; GCN-NEXT: SI_SPILL_S64_SAVE [[V_READFIRSTLANE_B32_]], %stack.0, implicit $exec, implicit $sgpr32 :: (store (s64) into %stack.0, align 4, addrspace 5) - ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY [[V_READFIRSTLANE_B32_]] - ; GCN-NEXT: dead [[S_LOAD_DWORD_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY1]], 0, 0 :: ("amdgpu-noclobber" load (s32), addrspace 1) - ; GCN-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0 - ; GCN-NEXT: [[S_LOAD_DWORD_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_MOV_B64_1]], 0, 0 :: ("amdgpu-noclobber" load (s32), addrspace 1) - ; GCN-NEXT: [[SI_SPILL_S64_RESTORE1:%[0-9]+]]:sgpr_64 = SI_SPILL_S64_RESTORE %stack.2, implicit $exec, implicit $sgpr32 :: (load (s64) from %stack.2, align 4, addrspace 5) - ; GCN-NEXT: undef [[COPY2:%[0-9]+]].sub1:sgpr_64 = COPY [[SI_SPILL_S64_RESTORE1]].sub1 - ; GCN-NEXT: [[COPY2:%[0-9]+]].sub0:sgpr_64 = S_MOV_B32 1 - ; GCN-NEXT: S_CBRANCH_SCC1 %bb.10, implicit undef $scc - ; GCN-NEXT: S_BRANCH %bb.7 - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.7: - ; GCN-NEXT: successors: %bb.8(0x40000000), %bb.9(0x40000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: SI_SPILL_S64_SAVE [[COPY2]], %stack.2, implicit $exec, implicit $sgpr32 :: (store (s64) into %stack.2, align 4, addrspace 5) - ; GCN-NEXT: undef [[V_READFIRSTLANE_B32_3:%[0-9]+]].sub0:sgpr_64 = V_READFIRSTLANE_B32 undef [[DEF1]].sub0, implicit $exec - ; GCN-NEXT: dead [[V_READFIRSTLANE_B32_3:%[0-9]+]].sub1:sgpr_64 = V_READFIRSTLANE_B32 undef [[DEF1]].sub1, implicit $exec - ; GCN-NEXT: [[DEF3:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 - ; GCN-NEXT: $vcc = COPY [[DEF3]] - ; GCN-NEXT: S_CBRANCH_VCCNZ %bb.9, implicit $vcc - ; GCN-NEXT: S_BRANCH %bb.8 - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.8: - ; GCN-NEXT: successors: %bb.9(0x80000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sgpr_32 = S_MOV_B32 -1 - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.9: - ; GCN-NEXT: successors: %bb.10(0x80000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = COPY [[S_MOV_B32_4]] - ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_64 = SI_SPILL_S64_RESTORE %stack.2, implicit $exec, implicit $sgpr32 :: (load (s64) from %stack.2, align 4, addrspace 5) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.10: - ; GCN-NEXT: successors: %bb.11(0x40000000), %bb.12(0x40000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[S_LOAD_DWORD_IMM2]], 0, implicit $mode, implicit $exec - ; GCN-NEXT: [[SI_SPILL_S32_RESTORE:%[0-9]+]]:sreg_32_xm0_xexec = SI_SPILL_S32_RESTORE %stack.17, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.17, addrspace 5) - ; GCN-NEXT: dead [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[V_CMP_GT_F32_e64_]], [[SI_SPILL_S32_RESTORE]], implicit-def dead $scc - ; GCN-NEXT: [[SI_SPILL_S32_RESTORE1:%[0-9]+]]:sgpr_32 = SI_SPILL_S32_RESTORE %stack.15, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.15, addrspace 5) - ; GCN-NEXT: S_CMP_EQ_U32 [[SI_SPILL_S32_RESTORE1]], 0, implicit-def $scc - ; GCN-NEXT: dead [[DEF4:%[0-9]+]]:sreg_32_xm0_xexec = IMPLICIT_DEF - ; GCN-NEXT: [[SI_SPILL_S64_RESTORE2:%[0-9]+]]:sreg_64_xexec = SI_SPILL_S64_RESTORE %stack.18, implicit $exec, implicit $sgpr32 :: (load (s64) from %stack.18, align 4, addrspace 5) - ; GCN-NEXT: S_CMP_EQ_U32 [[SI_SPILL_S64_RESTORE2]].sub1, 0, implicit-def $scc - ; GCN-NEXT: dead [[DEF5:%[0-9]+]]:sreg_32_xm0_xexec = IMPLICIT_DEF - ; GCN-NEXT: [[SI_SPILL_S256_RESTORE:%[0-9]+]]:sgpr_256 = SI_SPILL_S256_RESTORE %stack.20, implicit $exec, implicit $sgpr32 :: (load (s256) from %stack.20, align 4, addrspace 5) - ; GCN-NEXT: undef [[COPY3:%[0-9]+]].sub0:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE]].sub0 { - ; GCN-NEXT: internal [[COPY3]].sub2:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE]].sub2 - ; GCN-NEXT: internal [[COPY3]].sub4:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE]].sub4 - ; GCN-NEXT: internal [[COPY3]].sub7:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE]].sub7 - ; GCN-NEXT: } - ; GCN-NEXT: dead [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY3]].sub7, [[S_LOAD_DWORD_IMM5]], implicit-def dead $scc - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY3]].sub0, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_2:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY3]].sub2, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_3:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY3]].sub4, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[S_OR_B32_1:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_LOAD_DWORDX8_IMM]].sub0, undef [[S_OR_B32_]], implicit-def dead $scc - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_4:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[S_LOAD_DWORDX8_IMM]].sub1, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_5:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[S_LOAD_DWORDX8_IMM]].sub2, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_6:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[S_LOAD_DWORDX8_IMM]].sub3, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_7:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[S_LOAD_DWORDX8_IMM]].sub4, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_8:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[S_LOAD_DWORDX8_IMM]].sub5, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_9:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[S_LOAD_DWORDX8_IMM]].sub6, 0, implicit $mode, implicit $exec - ; GCN-NEXT: [[SI_SPILL_S128_RESTORE:%[0-9]+]]:sgpr_128 = SI_SPILL_S128_RESTORE %stack.14, implicit $exec, implicit $sgpr32 :: (load (s128) from %stack.14, align 4, addrspace 5) - ; GCN-NEXT: undef [[COPY4:%[0-9]+]].sub0_sub1_sub2:sgpr_128 = COPY [[SI_SPILL_S128_RESTORE]].sub0_sub1_sub2 - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_10:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY4]].sub0, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_11:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY4]].sub1, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_12:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY4]].sub2, 0, implicit $mode, implicit $exec - ; GCN-NEXT: [[DEF6:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[DEF5]], [[DEF6]], implicit-def dead $scc - ; GCN-NEXT: dead [[DEF7:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[DEF8:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[DEF9:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[DEF10:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[DEF11:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[S_AND_B32_2:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[DEF11]], undef [[DEF11]], implicit-def dead $scc - ; GCN-NEXT: [[SI_SPILL_S256_RESTORE1:%[0-9]+]]:sgpr_256 = SI_SPILL_S256_RESTORE %stack.16, implicit $exec, implicit $sgpr32 :: (load (s256) from %stack.16, align 4, addrspace 5) - ; GCN-NEXT: undef [[COPY5:%[0-9]+]].sub0:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE1]].sub0 { - ; GCN-NEXT: internal [[COPY5]].sub2:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE1]].sub2 - ; GCN-NEXT: internal [[COPY5]].sub5:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE1]].sub5 - ; GCN-NEXT: internal [[COPY5]].sub7:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE1]].sub7 - ; GCN-NEXT: } - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_13:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY5]].sub0, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[S_AND_B32_3:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[V_CMP_GT_F32_e64_8]], undef [[V_CMP_GT_F32_e64_9]], implicit-def dead $scc - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_14:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY5]].sub2, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[S_OR_B32_2:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY5]].sub5, [[COPY5]].sub7, implicit-def dead $scc - ; GCN-NEXT: [[SI_SPILL_S256_RESTORE2:%[0-9]+]]:sgpr_256 = SI_SPILL_S256_RESTORE %stack.10, implicit $exec, implicit $sgpr32 :: (load (s256) from %stack.10, align 4, addrspace 5) - ; GCN-NEXT: undef [[COPY6:%[0-9]+]].lo16_hi16_sub1_lo16_sub1_hi16_sub2_lo16_sub2_hi16_sub3_lo16_sub3_hi16_sub4_lo16_sub4_hi16_sub5_lo16_sub5_hi16_sub6_lo16_sub6_hi16:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE2]].lo16_hi16_sub1_lo16_sub1_hi16_sub2_lo16_sub2_hi16_sub3_lo16_sub3_hi16_sub4_lo16_sub4_hi16_sub5_lo16_sub5_hi16_sub6_lo16_sub6_hi16 - ; GCN-NEXT: dead [[S_OR_B32_3:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY6]].sub0, [[COPY6]].sub1, implicit-def dead $scc - ; GCN-NEXT: dead [[S_OR_B32_4:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY6]].sub2, undef [[S_OR_B32_3]], implicit-def dead $scc - ; GCN-NEXT: [[SI_SPILL_S32_RESTORE2:%[0-9]+]]:sreg_32_xm0_xexec = SI_SPILL_S32_RESTORE %stack.9, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.9, addrspace 5) - ; GCN-NEXT: dead [[S_AND_B32_4:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[S_OR_B32_3]], [[SI_SPILL_S32_RESTORE2]], implicit-def dead $scc - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_15:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY6]].sub3, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_16:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY6]].sub4, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_17:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY6]].sub5, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_18:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY6]].sub6, 0, implicit $mode, implicit $exec - ; GCN-NEXT: [[SI_SPILL_S32_RESTORE3:%[0-9]+]]:sreg_32_xm0_xexec = SI_SPILL_S32_RESTORE %stack.11, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.11, addrspace 5) - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_19:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[SI_SPILL_S32_RESTORE3]], 0, implicit $mode, implicit $exec - ; GCN-NEXT: [[SI_SPILL_S256_RESTORE3:%[0-9]+]]:sgpr_256 = SI_SPILL_S256_RESTORE %stack.13, implicit $exec, implicit $sgpr32 :: (load (s256) from %stack.13, align 4, addrspace 5) - ; GCN-NEXT: undef [[COPY7:%[0-9]+]].sub0:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE3]].sub0 { - ; GCN-NEXT: internal [[COPY7]].sub2:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE3]].sub2 - ; GCN-NEXT: internal [[COPY7]].sub4:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE3]].sub4 - ; GCN-NEXT: internal [[COPY7]].sub7:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE3]].sub7 - ; GCN-NEXT: } - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_20:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY7]].sub0, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_21:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY7]].sub2, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[DEF12:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_22:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY7]].sub4, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[S_AND_B32_5:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[DEF12]], undef [[V_CMP_GT_F32_e64_20]], implicit-def dead $scc - ; GCN-NEXT: S_CMP_EQ_U32 [[COPY7]].sub7, 0, implicit-def $scc - ; GCN-NEXT: undef [[COPY8:%[0-9]+]].sub0:sgpr_512 = COPY [[S_LOAD_DWORDX16_IMM]].sub0 { - ; GCN-NEXT: internal [[COPY8]].sub2:sgpr_512 = COPY [[S_LOAD_DWORDX16_IMM]].sub2 - ; GCN-NEXT: internal [[COPY8]].sub4:sgpr_512 = COPY [[S_LOAD_DWORDX16_IMM]].sub4 - ; GCN-NEXT: internal [[COPY8]].sub6:sgpr_512 = COPY [[S_LOAD_DWORDX16_IMM]].sub6 - ; GCN-NEXT: internal [[COPY8]].sub9:sgpr_512 = COPY [[S_LOAD_DWORDX16_IMM]].sub9 - ; GCN-NEXT: internal [[COPY8]].sub10:sgpr_512 = COPY [[S_LOAD_DWORDX16_IMM]].sub10 - ; GCN-NEXT: internal [[COPY8]].sub13:sgpr_512 = COPY [[S_LOAD_DWORDX16_IMM]].sub13 - ; GCN-NEXT: internal [[COPY8]].sub14:sgpr_512 = COPY [[S_LOAD_DWORDX16_IMM]].sub14 - ; GCN-NEXT: } - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_23:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY8]].sub0, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_24:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY8]].sub2, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_25:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY8]].sub4, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_26:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY8]].sub6, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[S_AND_B32_6:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[V_CMP_GT_F32_e64_23]], undef [[V_CMP_GT_F32_e64_23]], implicit-def dead $scc - ; GCN-NEXT: dead [[S_OR_B32_5:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY8]].sub10, [[COPY8]].sub9, implicit-def dead $scc - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_27:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY8]].sub13, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_28:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY8]].sub14, 0, implicit $mode, implicit $exec - ; GCN-NEXT: [[SI_SPILL_S512_RESTORE:%[0-9]+]]:sgpr_512 = SI_SPILL_S512_RESTORE %stack.12, implicit $exec, implicit $sgpr32 :: (load (s512) from %stack.12, align 4, addrspace 5) - ; GCN-NEXT: undef [[COPY9:%[0-9]+]].sub1:sgpr_512 = COPY [[SI_SPILL_S512_RESTORE]].sub1 { - ; GCN-NEXT: internal [[COPY9]].sub5:sgpr_512 = COPY [[SI_SPILL_S512_RESTORE]].sub5 - ; GCN-NEXT: internal [[COPY9]].sub6:sgpr_512 = COPY [[SI_SPILL_S512_RESTORE]].sub6 - ; GCN-NEXT: internal [[COPY9]].sub9:sgpr_512 = COPY [[SI_SPILL_S512_RESTORE]].sub9 - ; GCN-NEXT: internal [[COPY9]].sub10:sgpr_512 = COPY [[SI_SPILL_S512_RESTORE]].sub10 - ; GCN-NEXT: internal [[COPY9]].sub12:sgpr_512 = COPY [[SI_SPILL_S512_RESTORE]].sub12 - ; GCN-NEXT: internal [[COPY9]].sub15:sgpr_512 = COPY [[SI_SPILL_S512_RESTORE]].sub15 - ; GCN-NEXT: } - ; GCN-NEXT: S_CMP_EQ_U32 [[COPY9]].sub1, 0, implicit-def $scc - ; GCN-NEXT: dead [[DEF13:%[0-9]+]]:sreg_32_xm0_xexec = IMPLICIT_DEF - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_29:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY9]].sub5, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_30:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY9]].sub6, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[DEF14:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_31:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY9]].sub9, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_32:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY9]].sub10, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[DEF15:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[S_AND_B32_7:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[DEF15]], undef [[DEF14]], implicit-def dead $scc - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_33:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY9]].sub12, 0, implicit $mode, implicit $exec - ; GCN-NEXT: [[SI_SPILL_S256_RESTORE4:%[0-9]+]]:sgpr_256 = SI_SPILL_S256_RESTORE %stack.6, implicit $exec, implicit $sgpr32 :: (load (s256) from %stack.6, align 4, addrspace 5) - ; GCN-NEXT: undef [[COPY10:%[0-9]+]].lo16_hi16_sub1_lo16_sub1_hi16_sub2_lo16_sub2_hi16_sub3_lo16_sub3_hi16_sub4_lo16_sub4_hi16_sub5_lo16_sub5_hi16_sub6_lo16_sub6_hi16:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE4]].lo16_hi16_sub1_lo16_sub1_hi16_sub2_lo16_sub2_hi16_sub3_lo16_sub3_hi16_sub4_lo16_sub4_hi16_sub5_lo16_sub5_hi16_sub6_lo16_sub6_hi16 - ; GCN-NEXT: dead [[S_OR_B32_6:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY10]].sub0, [[COPY9]].sub15, implicit-def dead $scc - ; GCN-NEXT: dead [[DEF16:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_34:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY10]].sub1, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_35:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY10]].sub2, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[DEF17:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_36:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY10]].sub3, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_37:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY10]].sub4, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[DEF18:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_38:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY10]].sub5, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_39:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY10]].sub6, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[S_AND_B32_8:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[DEF18]], undef [[DEF17]], implicit-def dead $scc - ; GCN-NEXT: [[SI_SPILL_S256_RESTORE5:%[0-9]+]]:sgpr_256 = SI_SPILL_S256_RESTORE %stack.4, implicit $exec, implicit $sgpr32 :: (load (s256) from %stack.4, align 4, addrspace 5) - ; GCN-NEXT: undef [[COPY11:%[0-9]+]].sub0_sub1_sub2_sub3_sub4_sub5:sgpr_256 = COPY [[SI_SPILL_S256_RESTORE5]].sub0_sub1_sub2_sub3_sub4_sub5 - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_40:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY11]].sub0, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_41:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY11]].sub1, 0, implicit $mode, implicit $exec - ; GCN-NEXT: [[SI_SPILL_S32_RESTORE4:%[0-9]+]]:sreg_32_xm0_xexec = SI_SPILL_S32_RESTORE %stack.3, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.3, addrspace 5) - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_42:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[SI_SPILL_S32_RESTORE4]], 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_43:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY11]].sub2, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_44:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[COPY11]].sub3, 0, implicit $mode, implicit $exec - ; GCN-NEXT: dead [[S_OR_B32_7:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY11]].sub4, [[COPY11]].sub5, implicit-def dead $scc - ; GCN-NEXT: S_CMP_EQ_U32 [[SI_SPILL_S32_RESTORE4]], 0, implicit-def $scc - ; GCN-NEXT: [[SI_SPILL_S32_RESTORE5:%[0-9]+]]:sreg_32_xm0_xexec = SI_SPILL_S32_RESTORE %stack.5, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.5, addrspace 5) - ; GCN-NEXT: dead [[S_AND_B32_9:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[S_OR_B32_7]], [[SI_SPILL_S32_RESTORE5]], implicit-def dead $scc - ; GCN-NEXT: dead [[S_LOAD_DWORD_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY2]], 0, 0 :: ("amdgpu-noclobber" load (s32), align 8, addrspace 1) - ; GCN-NEXT: [[SI_SPILL_S256_RESTORE6:%[0-9]+]]:sgpr_256 = SI_SPILL_S256_RESTORE %stack.8, implicit $exec, implicit $sgpr32 :: (load (s256) from %stack.8, align 4, addrspace 5) - ; GCN-NEXT: S_CMP_EQ_U32 [[SI_SPILL_S256_RESTORE6]].sub7, 0, implicit-def $scc - ; GCN-NEXT: [[SI_SPILL_S32_RESTORE6:%[0-9]+]]:sgpr_32 = SI_SPILL_S32_RESTORE %stack.7, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.7, addrspace 5) - ; GCN-NEXT: dead [[V_CMP_GT_F32_e64_45:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, [[SI_SPILL_S32_RESTORE6]], 0, implicit $mode, implicit $exec - ; GCN-NEXT: [[DEF19:%[0-9]+]]:sreg_32 = IMPLICIT_DEF - ; GCN-NEXT: dead [[S_AND_B32_10:%[0-9]+]]:sreg_32 = S_AND_B32 [[DEF19]], undef [[S_LOAD_DWORD_IMM6]], implicit-def dead $scc - ; GCN-NEXT: dead [[S_AND_B32_11:%[0-9]+]]:sreg_32 = S_AND_B32 undef [[S_AND_B32_10]], [[S_MOV_B32_2]], implicit-def dead $scc - ; GCN-NEXT: $vcc = COPY undef [[S_AND_B32_11]] - ; GCN-NEXT: S_CBRANCH_VCCNZ %bb.12, implicit $vcc - ; GCN-NEXT: S_BRANCH %bb.11 - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.11: - ; GCN-NEXT: successors: %bb.12(0x80000000) - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: {{ $}} - ; GCN-NEXT: bb.12: - ; GCN-NEXT: [[SI_SPILL_S64_RESTORE3:%[0-9]+]]:sgpr_64 = SI_SPILL_S64_RESTORE %stack.1, implicit $exec, implicit $sgpr32 :: (load (s64) from %stack.1, align 4, addrspace 5) - ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR undef [[DEF]], undef [[DEF]], [[SI_SPILL_S64_RESTORE3]], 0, 0, implicit $exec :: (store (s32), addrspace 1) - ; GCN-NEXT: [[SI_SPILL_S64_RESTORE4:%[0-9]+]]:sgpr_64 = SI_SPILL_S64_RESTORE %stack.0, implicit $exec, implicit $sgpr32 :: (load (s64) from %stack.0, align 4, addrspace 5) - ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR undef [[DEF]], undef [[DEF]], [[SI_SPILL_S64_RESTORE4]], 0, 0, implicit $exec :: (store (s32), addrspace 1) - ; GCN-NEXT: S_ENDPGM 0 - bb.0: - successors: %bb.1, %bb.2 - liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13 - - %0:vgpr_32 = IMPLICIT_DEF - undef %1.sub1:vreg_64 = IMPLICIT_DEF - %109:sgpr_32 = COPY $sgpr1 - undef %93.sub1:sgpr_64 = COPY $sgpr0 - undef %106.sub0:sgpr_64 = V_READFIRSTLANE_B32 undef %0, implicit $exec - %106.sub1:sgpr_64 = V_READFIRSTLANE_B32 undef %0, implicit $exec - undef %105.sub0:sgpr_64 = V_READFIRSTLANE_B32 undef %0, implicit $exec - %105.sub1:sgpr_64 = IMPLICIT_DEF - undef %104.sub0:sgpr_64 = V_READFIRSTLANE_B32 undef %0, implicit $exec - %104.sub1:sgpr_64 = V_READFIRSTLANE_B32 undef %0, implicit $exec - %4:sreg_32 = S_MOV_B32 0 - %5:sreg_32 = S_MOV_B32 0 - S_CBRANCH_SCC1 %bb.2, implicit undef $scc - S_BRANCH %bb.1 - - bb.1: - %5:sreg_32 = IMPLICIT_DEF - - bb.2: - successors: %bb.3, %bb.4 - - %101:sgpr_128 = S_LOAD_DWORDX4_IMM undef %104, 132, 0 :: ("amdgpu-noclobber" load (s128), align 8, addrspace 1) - %10:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 188, 0 :: ("amdgpu-noclobber" load (s256), align 8, addrspace 1) - %100:sreg_64 = S_MOV_B64 0 - S_CBRANCH_SCC1 %bb.4, implicit undef $scc - S_BRANCH %bb.3 - - bb.3: - %4:sreg_32 = S_MOV_B32 -1 - - bb.4: - successors: %bb.5, %bb.6 - - %102:sreg_64_xexec = S_LOAD_DWORDX2_IMM undef %104, 120, 0 :: ("amdgpu-noclobber" load (s64), align 16, addrspace 1) - %8:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 352, 0 :: ("amdgpu-noclobber" load (s256), align 16, addrspace 1) - %98:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %97:sreg_64, 0, 0 - %7:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 652, 0 :: ("amdgpu-noclobber" load (s256), align 8, addrspace 1) - %96:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %100, 0, 0 :: ("amdgpu-noclobber" load (s32), align 8, addrspace 1) - %6:sgpr_256 = S_LOAD_DWORDX8_IMM %104, 688, 0 :: ("amdgpu-noclobber" load (s256), align 16, addrspace 1) - %2:sgpr_32 = S_MOV_B32 0 - %3:sreg_32 = S_MOV_B32 0 - S_CBRANCH_SCC1 %bb.6, implicit undef $scc - S_BRANCH %bb.5 - - bb.5: - %3:sreg_32 = S_MOV_B32 -1 - - bb.6: - successors: %bb.7, %bb.10 - - %103:sgpr_32 = S_LOAD_DWORD_IMM undef %104, 0, 0 :: ("amdgpu-noclobber" load (s32), align 16, addrspace 1) - %115:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 152, 0 :: ("amdgpu-noclobber" load (s256), align 4, addrspace 1) - %114:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 220, 0 :: ("amdgpu-noclobber" load (s256), align 4, addrspace 1) - %113:sgpr_256 = S_LOAD_DWORDX8_IMM undef %104, 384, 0 :: ("amdgpu-noclobber" load (s256), align 4, addrspace 1) - %112:sgpr_512 = S_LOAD_DWORDX16_IMM undef %104, 440, 0 :: ("amdgpu-noclobber" load (s512), align 8, addrspace 1) - %111:sgpr_512 = S_LOAD_DWORDX16_IMM undef %104, 584, 0 :: ("amdgpu-noclobber" load (s512), align 16, addrspace 1) - %110:sgpr_256 = S_LOAD_DWORDX8_IMM %106, 156, 0 :: ("amdgpu-noclobber" load (s256), align 8, addrspace 1) - %95:sgpr_32 = S_LOAD_DWORD_IMM %105, 0, 0 :: ("amdgpu-noclobber" load (s32), align 8, addrspace 1) - %94:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %106, 0, 0 :: ("amdgpu-noclobber" load (s32), addrspace 1) - %99:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %100, 0, 0 :: ("amdgpu-noclobber" load (s32), addrspace 1) - %107:sreg_32 = IMPLICIT_DEF - %108:sreg_32 = IMPLICIT_DEF - %93.sub0:sgpr_64 = S_MOV_B32 1 - S_CBRANCH_SCC1 %bb.10, implicit undef $scc - S_BRANCH %bb.7 - - bb.7: - successors: %bb.8, %bb.9 - - undef %13.sub0:sgpr_64 = V_READFIRSTLANE_B32 undef %1.sub0, implicit $exec - %13.sub1:sgpr_64 = V_READFIRSTLANE_B32 undef %1.sub1, implicit $exec - %92:sreg_32 = IMPLICIT_DEF - %2:sgpr_32 = S_MOV_B32 0 - $vcc = COPY %92 - S_CBRANCH_VCCNZ %bb.9, implicit $vcc - S_BRANCH %bb.8 - - bb.8: - %2:sgpr_32 = S_MOV_B32 -1 - - bb.9: - - bb.10: - successors: %bb.11, %bb.12 - - %91:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %103, 0, implicit $mode, implicit $exec - %90:sreg_32 = S_AND_B32 undef %91, %5, implicit-def dead $scc - S_CMP_EQ_U32 %109, 0, implicit-def $scc - %12:sreg_32_xm0_xexec = IMPLICIT_DEF - S_CMP_EQ_U32 %102.sub1, 0, implicit-def $scc - %11:sreg_32_xm0_xexec = IMPLICIT_DEF - %77:sreg_32 = S_OR_B32 %115.sub7, %99, implicit-def dead $scc - %82:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %115.sub0, 0, implicit $mode, implicit $exec - %79:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %115.sub2, 0, implicit $mode, implicit $exec - %78:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %115.sub4, 0, implicit $mode, implicit $exec - %76:sreg_32 = S_OR_B32 %10.sub0, undef %77, implicit-def dead $scc - %75:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub1, 0, implicit $mode, implicit $exec - %74:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub2, 0, implicit $mode, implicit $exec - %73:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub3, 0, implicit $mode, implicit $exec - %72:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub4, 0, implicit $mode, implicit $exec - %70:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub5, 0, implicit $mode, implicit $exec - %69:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %10.sub6, 0, implicit $mode, implicit $exec - %87:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %101.sub0, 0, implicit $mode, implicit $exec - %86:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %101.sub1, 0, implicit $mode, implicit $exec - %83:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %101.sub2, 0, implicit $mode, implicit $exec - %89:sreg_32 = S_AND_B32 undef %11, %108, implicit-def dead $scc - %88:sreg_32 = IMPLICIT_DEF - %85:sreg_32 = IMPLICIT_DEF - %84:sreg_32 = IMPLICIT_DEF - %81:sreg_32 = IMPLICIT_DEF - %80:sreg_32 = IMPLICIT_DEF - %71:sreg_32 = S_AND_B32 undef %80, undef %80, implicit-def dead $scc - %67:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %114.sub0, 0, implicit $mode, implicit $exec - %68:sreg_32 = S_AND_B32 undef %70, undef %69, implicit-def dead $scc - %66:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %114.sub2, 0, implicit $mode, implicit $exec - %65:sreg_32 = S_OR_B32 %114.sub5, %114.sub7, implicit-def dead $scc - %63:sreg_32 = S_OR_B32 %8.sub0, %8.sub1, implicit-def dead $scc - %62:sreg_32 = S_OR_B32 %8.sub2, undef %63, implicit-def dead $scc - %64:sreg_32 = S_AND_B32 undef %63, %4, implicit-def dead $scc - %61:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %8.sub3, 0, implicit $mode, implicit $exec - %60:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %8.sub4, 0, implicit $mode, implicit $exec - %59:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %8.sub5, 0, implicit $mode, implicit $exec - %58:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %8.sub6, 0, implicit $mode, implicit $exec - %57:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %98, 0, implicit $mode, implicit $exec - %56:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %113.sub0, 0, implicit $mode, implicit $exec - %53:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %113.sub2, 0, implicit $mode, implicit $exec - %55:sreg_32 = IMPLICIT_DEF - %52:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %113.sub4, 0, implicit $mode, implicit $exec - %54:sreg_32 = S_AND_B32 undef %55, undef %56, implicit-def dead $scc - S_CMP_EQ_U32 %113.sub7, 0, implicit-def $scc - %51:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub0, 0, implicit $mode, implicit $exec - %49:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub2, 0, implicit $mode, implicit $exec - %48:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub4, 0, implicit $mode, implicit $exec - %47:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub6, 0, implicit $mode, implicit $exec - %50:sreg_32 = S_AND_B32 undef %51, undef %51, implicit-def dead $scc - %46:sreg_32 = S_OR_B32 %112.sub10, %112.sub9, implicit-def dead $scc - %45:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub13, 0, implicit $mode, implicit $exec - %44:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %112.sub14, 0, implicit $mode, implicit $exec - S_CMP_EQ_U32 %111.sub1, 0, implicit-def $scc - %116:sreg_32_xm0_xexec = IMPLICIT_DEF - %42:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %111.sub5, 0, implicit $mode, implicit $exec - %41:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %111.sub6, 0, implicit $mode, implicit $exec - %43:sreg_32 = IMPLICIT_DEF - %38:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %111.sub9, 0, implicit $mode, implicit $exec - %37:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %111.sub10, 0, implicit $mode, implicit $exec - %40:sreg_32 = IMPLICIT_DEF - %39:sreg_32 = S_AND_B32 undef %40, undef %43, implicit-def dead $scc - %36:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %111.sub12, 0, implicit $mode, implicit $exec - %34:sreg_32 = S_OR_B32 %7.sub0, %111.sub15, implicit-def dead $scc - %35:sreg_32 = IMPLICIT_DEF - %32:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub1, 0, implicit $mode, implicit $exec - %31:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub2, 0, implicit $mode, implicit $exec - %33:sreg_32 = IMPLICIT_DEF - %28:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub3, 0, implicit $mode, implicit $exec - %27:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub4, 0, implicit $mode, implicit $exec - %30:sreg_32 = IMPLICIT_DEF - %26:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub5, 0, implicit $mode, implicit $exec - %25:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %7.sub6, 0, implicit $mode, implicit $exec - %29:sreg_32 = S_AND_B32 undef %30, undef %33, implicit-def dead $scc - %23:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %6.sub0, 0, implicit $mode, implicit $exec - %22:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %6.sub1, 0, implicit $mode, implicit $exec - %24:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %96, 0, implicit $mode, implicit $exec - %21:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %6.sub2, 0, implicit $mode, implicit $exec - %20:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %6.sub3, 0, implicit $mode, implicit $exec - %19:sreg_32 = S_OR_B32 %6.sub4, %6.sub5, implicit-def dead $scc - S_CMP_EQ_U32 %96, 0, implicit-def $scc - %18:sreg_32 = S_AND_B32 undef %19, %3, implicit-def dead $scc - %14:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %93, 0, 0 :: ("amdgpu-noclobber" load (s32), align 8, addrspace 1) - S_CMP_EQ_U32 %110.sub7, 0, implicit-def $scc - %16:sreg_32 = V_CMP_GT_F32_e64 0, 0, 0, %95, 0, implicit $mode, implicit $exec - %17:sreg_32 = S_AND_B32 %107, undef %14, implicit-def dead $scc - %15:sreg_32 = S_AND_B32 undef %17, %2, implicit-def dead $scc - $vcc = COPY undef %15 - S_CBRANCH_VCCNZ %bb.12, implicit $vcc - S_BRANCH %bb.11 - - bb.11: - - bb.12: - GLOBAL_STORE_DWORD_SADDR undef %0, undef %0, %104, 0, 0, implicit $exec :: (store (s32), addrspace 1) - GLOBAL_STORE_DWORD_SADDR undef %0, undef %0, %106, 0, 0, implicit $exec :: (store (s32), addrspace 1) - S_ENDPGM 0 -...