aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFangrui Song <i@maskray.me>2022-11-18 01:23:12 +0000
committerFangrui Song <i@maskray.me>2022-11-18 01:23:12 +0000
commit6b852ffa9973015fb5deb6d859d980692387dcc7 (patch)
tree2211c6d152c025669a9650cb3cd94460da94983f
parentdcb71b5e1d1311c41f409c8dab26e04b084875be (diff)
downloadllvm-6b852ffa9973015fb5deb6d859d980692387dcc7.zip
llvm-6b852ffa9973015fb5deb6d859d980692387dcc7.tar.gz
llvm-6b852ffa9973015fb5deb6d859d980692387dcc7.tar.bz2
[Sink] Process basic blocks with a single successor
This condition seems unnecessary. Reviewed By: arsenm Differential Revision: https://reviews.llvm.org/D93511
-rw-r--r--llvm/lib/Transforms/Scalar/Sink.cpp3
-rw-r--r--llvm/test/CodeGen/AMDGPU/andorbitset.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll50
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/operand-folding.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll558
-rw-r--r--llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll104
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll99
-rw-r--r--llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll3
-rw-r--r--llvm/test/Transforms/Sink/single-succ.ll2
12 files changed, 435 insertions, 425 deletions
diff --git a/llvm/lib/Transforms/Scalar/Sink.cpp b/llvm/lib/Transforms/Scalar/Sink.cpp
index dad45c4..8b99f73 100644
--- a/llvm/lib/Transforms/Scalar/Sink.cpp
+++ b/llvm/lib/Transforms/Scalar/Sink.cpp
@@ -174,9 +174,6 @@ static bool SinkInstruction(Instruction *Inst,
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI,
AAResults &AA) {
- // Can't sink anything out of a block that has less than two successors.
- if (BB.getTerminator()->getNumSuccessors() <= 1) return false;
-
// Don't bother sinking code out of unreachable blocks. In addition to being
// unprofitable, it can also lead to infinite looping, because in an
// unreachable loop there may be nowhere to stop.
diff --git a/llvm/test/CodeGen/AMDGPU/andorbitset.ll b/llvm/test/CodeGen/AMDGPU/andorbitset.ll
index 621ab51..196e813 100644
--- a/llvm/test/CodeGen/AMDGPU/andorbitset.ll
+++ b/llvm/test/CodeGen/AMDGPU/andorbitset.ll
@@ -48,13 +48,17 @@ define amdgpu_kernel void @s_set_midbit(i32 addrspace(1)* %out, i32 %in) {
ret void
}
+@gv = external addrspace(1) global i32
+
; Make sure there's no verifier error with an undef source.
; SI-LABEL: {{^}}bitset_verifier_error:
-; SI: s_bitset0_b32 s{{[0-9]+}}, 31
+; SI-NOT: %bb.1:
+; SI: s_bitset0_b32 s{{[0-9]+}}, 31
define void @bitset_verifier_error() local_unnamed_addr #0 {
bb:
%i = call float @llvm.fabs.f32(float undef) #0
%i1 = bitcast float %i to i32
+ store i32 %i1, i32 addrspace(1)* @gv
br label %bb2
bb2:
diff --git a/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll b/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll
index 276847c..a427bc6 100644
--- a/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll
+++ b/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll
@@ -120,25 +120,27 @@ define protected amdgpu_kernel void @nand(i32 addrspace(1)* %p, %S addrspace(1)*
; CHECK: ; %bb.0:
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b64 s[4:5], 0
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
+; CHECK-NEXT: v_mov_b32_e32 v0, s6
; CHECK-NEXT: .LBB5_1: ; %atomicrmw.start
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_mov_b32_e32 v3, v1
-; CHECK-NEXT: v_not_b32_e32 v1, v3
-; CHECK-NEXT: v_or_b32_e32 v2, -2, v1
-; CHECK-NEXT: global_atomic_cmpswap v1, v0, v[2:3], s[0:1] glc
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
+; CHECK-NEXT: v_not_b32_e32 v0, v3
+; CHECK-NEXT: v_or_b32_e32 v2, -2, v0
+; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
; CHECK-NEXT: s_cbranch_execnz .LBB5_1
; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
-; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v1, 12, s[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-NEXT: v_mov_b32_e32 v3, s3
+; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, v[2:3]
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
@@ -330,26 +332,28 @@ define protected amdgpu_kernel void @fadd(float addrspace(1)* %p, %S addrspace(1
; CHECK: ; %bb.0:
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b64 s[4:5], 0
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
+; CHECK-NEXT: v_mov_b32_e32 v0, s6
; CHECK-NEXT: .LBB14_1: ; %atomicrmw.start
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_mov_b32_e32 v3, v1
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
; CHECK-NEXT: v_add_f32_e32 v2, 1.0, v3
-; CHECK-NEXT: global_atomic_cmpswap v1, v0, v[2:3], s[0:1] glc
+; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
; CHECK-NEXT: s_cbranch_execnz .LBB14_1
; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
-; CHECK-NEXT: v_cvt_u32_f32_e32 v0, v1
+; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v0
+; CHECK-NEXT: v_mov_b32_e32 v0, s2
+; CHECK-NEXT: v_mov_b32_e32 v1, s3
+; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, 12, v[0:1]
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
-; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, s[2:3]
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
%f32 = atomicrmw fadd float addrspace(1)* %p, float 1.0 monotonic
@@ -365,26 +369,28 @@ define protected amdgpu_kernel void @fsub(float addrspace(1)* %p, %S addrspace(1
; CHECK: ; %bb.0:
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b64 s[4:5], 0
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
+; CHECK-NEXT: v_mov_b32_e32 v0, s6
; CHECK-NEXT: .LBB15_1: ; %atomicrmw.start
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_mov_b32_e32 v3, v1
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
; CHECK-NEXT: v_add_f32_e32 v2, -1.0, v3
-; CHECK-NEXT: global_atomic_cmpswap v1, v0, v[2:3], s[0:1] glc
+; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v1, v3
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
; CHECK-NEXT: s_cbranch_execnz .LBB15_1
; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
-; CHECK-NEXT: v_cvt_u32_f32_e32 v0, v1
+; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v0
+; CHECK-NEXT: v_mov_b32_e32 v0, s2
+; CHECK-NEXT: v_mov_b32_e32 v1, s3
+; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, 12, v[0:1]
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
-; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, s[2:3]
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
%f32 = atomicrmw fsub float addrspace(1)* %p, float 1.0 monotonic
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll
index 69ee096..49bb769 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll
@@ -205,6 +205,8 @@ main_body:
ret void
}
+@gv = external addrspace(1) global i32
+
;GCN-LABEL: {{^}}s_buffer_load_index_across_bb:
;GCN-NOT: s_waitcnt;
;GCN: v_or_b32
@@ -212,6 +214,7 @@ main_body:
define amdgpu_ps void @s_buffer_load_index_across_bb(<4 x i32> inreg %desc, i32 %index) {
main_body:
%tmp = shl i32 %index, 4
+ store i32 %tmp, i32 addrspace(1)* @gv
br label %bb1
bb1: ; preds = %main_body
@@ -224,10 +227,7 @@ bb1: ; preds = %main_body
;GCN-LABEL: {{^}}s_buffer_load_index_across_bb_merged:
;GCN-NOT: s_waitcnt;
-;GCN: v_or_b32
-;GCN: v_or_b32
-;GCN: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen
-;GCN: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen
+;GCN: buffer_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen offset:8
define amdgpu_ps void @s_buffer_load_index_across_bb_merged(<4 x i32> inreg %desc, i32 %index) {
main_body:
%tmp = shl i32 %index, 4
diff --git a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
index 776d4ed..151cb2c 100644
--- a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
@@ -19,7 +19,6 @@
define amdgpu_kernel void @local_stack_offset_uses_sp(i64 addrspace(1)* %out) {
; MUBUF-LABEL: local_stack_offset_uses_sp:
; MUBUF: ; %bb.0: ; %entry
-; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
; MUBUF-NEXT: s_add_u32 s0, s0, s9
; MUBUF-NEXT: v_mov_b32_e32 v1, 0x3000
; MUBUF-NEXT: s_addc_u32 s1, s1, 0
@@ -48,17 +47,17 @@ define amdgpu_kernel void @local_stack_offset_uses_sp(i64 addrspace(1)* %out) {
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen offset:4 glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
+; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; MUBUF-NEXT: v_mov_b32_e32 v6, 0
; MUBUF-NEXT: v_add_co_u32_e32 v0, vcc, v2, v4
; MUBUF-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v5, vcc
-; MUBUF-NEXT: v_mov_b32_e32 v2, 0
; MUBUF-NEXT: s_waitcnt lgkmcnt(0)
-; MUBUF-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
+; MUBUF-NEXT: global_store_dwordx2 v6, v[0:1], s[4:5]
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: s_endpgm
;
; FLATSCR-LABEL: local_stack_offset_uses_sp:
; FLATSCR: ; %bb.0: ; %entry
-; FLATSCR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; FLATSCR-NEXT: s_add_u32 flat_scratch_lo, s2, s5
; FLATSCR-NEXT: s_addc_u32 flat_scratch_hi, s3, 0
; FLATSCR-NEXT: v_mov_b32_e32 v0, 0
@@ -82,11 +81,12 @@ define amdgpu_kernel void @local_stack_offset_uses_sp(i64 addrspace(1)* %out) {
; FLATSCR-NEXT: s_movk_i32 s2, 0x3000
; FLATSCR-NEXT: scratch_load_dwordx2 v[2:3], off, s2 offset:64 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
+; FLATSCR-NEXT: v_mov_b32_e32 v4, 0
; FLATSCR-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
; FLATSCR-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
-; FLATSCR-NEXT: v_mov_b32_e32 v2, 0
; FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
-; FLATSCR-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; FLATSCR-NEXT: global_store_dwordx2 v4, v[0:1], s[0:1]
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_endpgm
entry:
@@ -203,7 +203,6 @@ entry:
define amdgpu_kernel void @local_stack_offset_uses_sp_flat(<3 x i64> addrspace(1)* %out) {
; MUBUF-LABEL: local_stack_offset_uses_sp_flat:
; MUBUF: ; %bb.0: ; %entry
-; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
; MUBUF-NEXT: s_add_u32 s0, s0, s9
; MUBUF-NEXT: s_addc_u32 s1, s1, 0
; MUBUF-NEXT: v_mov_b32_e32 v0, 0x4000
@@ -239,9 +238,10 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(<3 x i64> addrspace(1
; MUBUF-NEXT: v_mov_b32_e32 v13, 0x4000
; MUBUF-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
-; MUBUF-NEXT: v_mov_b32_e32 v12, 0
+; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
; MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
+; MUBUF-NEXT: v_mov_b32_e32 v12, 0
; MUBUF-NEXT: buffer_load_dword v8, v13, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: v_mov_b32_e32 v13, 0x4000
@@ -274,7 +274,6 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(<3 x i64> addrspace(1
;
; FLATSCR-LABEL: local_stack_offset_uses_sp_flat:
; FLATSCR: ; %bb.0: ; %entry
-; FLATSCR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; FLATSCR-NEXT: s_add_u32 flat_scratch_lo, s2, s5
; FLATSCR-NEXT: s_addc_u32 flat_scratch_hi, s3, 0
; FLATSCR-NEXT: v_mov_b32_e32 v0, 0
@@ -303,6 +302,7 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(<3 x i64> addrspace(1
; FLATSCR-NEXT: s_movk_i32 s2, 0x2000
; FLATSCR-NEXT: scratch_load_dwordx4 v[4:7], off, s2 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
; FLATSCR-NEXT: v_mov_b32_e32 v12, 0
; FLATSCR-NEXT: v_add_co_u32_e32 v2, vcc, v2, v6
; FLATSCR-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/operand-folding.ll b/llvm/test/CodeGen/AMDGPU/operand-folding.ll
index 008856b..38a038c 100644
--- a/llvm/test/CodeGen/AMDGPU/operand-folding.ll
+++ b/llvm/test/CodeGen/AMDGPU/operand-folding.ll
@@ -126,7 +126,7 @@ define amdgpu_kernel void @no_fold_tied_subregister() #1 {
; There should be exact one folding on the same operand.
; CHECK-LABEL: {{^}}no_extra_fold_on_same_opnd
-; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
+; CHECK-NOT: %bb.1:
; CHECK: v_xor_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define void @no_extra_fold_on_same_opnd() #1 {
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index 37c3127..9ca9f80 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -439,106 +439,104 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v0
; GFX8-NEXT: v_mov_b32_e32 v2, 0
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 17, v0
-; GFX8-NEXT: v_and_b32_e32 v4, 0xfe000000, v0
-; GFX8-NEXT: v_lshlrev_b64 v[2:3], 3, v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v5, s35
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, s34, v4
-; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, s34, v2
-; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; GFX8-NEXT: v_lshlrev_b64 v[1:2], 3, v[1:2]
+; GFX8-NEXT: v_and_b32_e32 v0, 0xfe000000, v0
+; GFX8-NEXT: v_or_b32_e32 v1, v0, v1
+; GFX8-NEXT: v_mov_b32_e32 v3, s35
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, s34, v1
+; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v2, v3, vcc
; GFX8-NEXT: s_movk_i32 s0, 0x5000
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1
+; GFX8-NEXT: v_mov_b32_e32 v5, 0
+; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
; GFX8-NEXT: v_mov_b32_e32 v6, 0
-; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GFX8-NEXT: v_mov_b32_e32 v7, 0
; GFX8-NEXT: s_movk_i32 s4, 0x7f
; GFX8-NEXT: .LBB1_1: ; %for.cond.preheader
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB1_2 Depth 2
-; GFX8-NEXT: v_mov_b32_e32 v5, v3
; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
; GFX8-NEXT: s_mov_b32 s5, 0
; GFX8-NEXT: .LBB1_2: ; %for.body
; GFX8-NEXT: ; Parent Loop BB1_1 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0xffffb000, v4
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0xffffb000, v3
; GFX8-NEXT: s_mov_b64 s[0:1], vcc
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0xffffb800, v4
+; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0xffffb800, v3
; GFX8-NEXT: s_mov_b64 s[2:3], vcc
-; GFX8-NEXT: v_addc_u32_e64 v9, vcc, -1, v5, s[0:1]
-; GFX8-NEXT: flat_load_dwordx2 v[8:9], v[8:9]
-; GFX8-NEXT: v_add_u32_e32 v12, vcc, 0xffffc000, v4
+; GFX8-NEXT: v_addc_u32_e64 v8, vcc, -1, v4, s[0:1]
+; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[7:8]
+; GFX8-NEXT: v_add_u32_e32 v11, vcc, 0xffffc000, v3
; GFX8-NEXT: s_mov_b64 s[0:1], vcc
-; GFX8-NEXT: v_addc_u32_e64 v11, vcc, -1, v5, s[2:3]
-; GFX8-NEXT: flat_load_dwordx2 v[10:11], v[10:11]
-; GFX8-NEXT: v_add_u32_e32 v14, vcc, 0xffffc800, v4
+; GFX8-NEXT: v_addc_u32_e64 v10, vcc, -1, v4, s[2:3]
+; GFX8-NEXT: flat_load_dwordx2 v[9:10], v[9:10]
+; GFX8-NEXT: v_add_u32_e32 v13, vcc, 0xffffc800, v3
; GFX8-NEXT: s_mov_b64 s[2:3], vcc
-; GFX8-NEXT: v_addc_u32_e64 v13, vcc, -1, v5, s[0:1]
+; GFX8-NEXT: v_addc_u32_e64 v12, vcc, -1, v4, s[0:1]
; GFX8-NEXT: s_addk_i32 s5, 0x2000
; GFX8-NEXT: s_cmp_gt_u32 s5, 0x3fffff
; GFX8-NEXT: s_waitcnt vmcnt(1)
-; GFX8-NEXT: v_add_u32_e32 v16, vcc, v8, v6
-; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v9, v7, vcc
-; GFX8-NEXT: flat_load_dwordx2 v[8:9], v[12:13]
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0xffffd000, v4
+; GFX8-NEXT: v_add_u32_e32 v15, vcc, v7, v5
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v8, v6, vcc
+; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[11:12]
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0xffffd000, v3
; GFX8-NEXT: s_mov_b64 s[0:1], vcc
-; GFX8-NEXT: v_addc_u32_e64 v15, vcc, -1, v5, s[2:3]
-; GFX8-NEXT: flat_load_dwordx2 v[12:13], v[14:15]
+; GFX8-NEXT: v_addc_u32_e64 v14, vcc, -1, v4, s[2:3]
+; GFX8-NEXT: flat_load_dwordx2 v[11:12], v[13:14]
; GFX8-NEXT: s_waitcnt vmcnt(2)
-; GFX8-NEXT: v_add_u32_e32 v16, vcc, v10, v16
-; GFX8-NEXT: v_addc_u32_e32 v11, vcc, v11, v7, vcc
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0xffffd800, v4
+; GFX8-NEXT: v_add_u32_e32 v15, vcc, v9, v15
+; GFX8-NEXT: v_addc_u32_e32 v10, vcc, v10, v6, vcc
+; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0xffffd800, v3
; GFX8-NEXT: s_mov_b64 s[2:3], vcc
-; GFX8-NEXT: v_addc_u32_e64 v7, vcc, -1, v5, s[0:1]
-; GFX8-NEXT: flat_load_dwordx2 v[6:7], v[6:7]
+; GFX8-NEXT: v_addc_u32_e64 v6, vcc, -1, v4, s[0:1]
+; GFX8-NEXT: flat_load_dwordx2 v[5:6], v[5:6]
; GFX8-NEXT: s_waitcnt vmcnt(2)
-; GFX8-NEXT: v_add_u32_e32 v14, vcc, v8, v16
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, v9, v11, vcc
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0xffffe000, v4
+; GFX8-NEXT: v_add_u32_e32 v13, vcc, v7, v15
+; GFX8-NEXT: v_addc_u32_e32 v8, vcc, v8, v10, vcc
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0xffffe000, v3
; GFX8-NEXT: s_mov_b64 s[0:1], vcc
-; GFX8-NEXT: v_addc_u32_e64 v11, vcc, -1, v5, s[2:3]
-; GFX8-NEXT: flat_load_dwordx2 v[10:11], v[10:11]
+; GFX8-NEXT: v_addc_u32_e64 v10, vcc, -1, v4, s[2:3]
+; GFX8-NEXT: flat_load_dwordx2 v[9:10], v[9:10]
; GFX8-NEXT: s_waitcnt vmcnt(2)
-; GFX8-NEXT: v_add_u32_e32 v14, vcc, v12, v14
-; GFX8-NEXT: v_addc_u32_e32 v13, vcc, v13, v9, vcc
-; GFX8-NEXT: v_add_u32_e32 v12, vcc, 0xffffe800, v4
+; GFX8-NEXT: v_add_u32_e32 v13, vcc, v11, v13
+; GFX8-NEXT: v_addc_u32_e32 v12, vcc, v12, v8, vcc
+; GFX8-NEXT: v_add_u32_e32 v11, vcc, 0xffffe800, v3
; GFX8-NEXT: s_mov_b64 s[2:3], vcc
-; GFX8-NEXT: v_addc_u32_e64 v9, vcc, -1, v5, s[0:1]
-; GFX8-NEXT: flat_load_dwordx2 v[8:9], v[8:9]
+; GFX8-NEXT: v_addc_u32_e64 v8, vcc, -1, v4, s[0:1]
+; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[7:8]
; GFX8-NEXT: s_waitcnt vmcnt(2)
-; GFX8-NEXT: v_add_u32_e32 v14, vcc, v6, v14
-; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v13, vcc
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0xfffff000, v4
+; GFX8-NEXT: v_add_u32_e32 v13, vcc, v5, v13
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v6, v12, vcc
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0xfffff000, v3
; GFX8-NEXT: s_mov_b64 s[0:1], vcc
-; GFX8-NEXT: v_addc_u32_e64 v13, vcc, -1, v5, s[2:3]
-; GFX8-NEXT: flat_load_dwordx2 v[12:13], v[12:13]
+; GFX8-NEXT: v_addc_u32_e64 v12, vcc, -1, v4, s[2:3]
+; GFX8-NEXT: flat_load_dwordx2 v[11:12], v[11:12]
; GFX8-NEXT: s_waitcnt vmcnt(2)
-; GFX8-NEXT: v_add_u32_e32 v14, vcc, v10, v14
-; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v11, v7, vcc
-; GFX8-NEXT: v_addc_u32_e64 v7, s[0:1], -1, v5, s[0:1]
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0xfffff800, v4
-; GFX8-NEXT: flat_load_dwordx2 v[6:7], v[6:7]
-; GFX8-NEXT: v_addc_u32_e32 v11, vcc, -1, v5, vcc
-; GFX8-NEXT: flat_load_dwordx2 v[10:11], v[10:11]
+; GFX8-NEXT: v_add_u32_e32 v13, vcc, v9, v13
+; GFX8-NEXT: v_addc_u32_e32 v14, vcc, v10, v6, vcc
+; GFX8-NEXT: v_addc_u32_e64 v6, s[0:1], -1, v4, s[0:1]
+; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0xfffff800, v3
+; GFX8-NEXT: flat_load_dwordx2 v[5:6], v[5:6]
+; GFX8-NEXT: v_addc_u32_e32 v10, vcc, -1, v4, vcc
+; GFX8-NEXT: flat_load_dwordx2 v[9:10], v[9:10]
; GFX8-NEXT: s_waitcnt vmcnt(3)
-; GFX8-NEXT: v_add_u32_e32 v14, vcc, v8, v14
-; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v9, v15, vcc
-; GFX8-NEXT: flat_load_dwordx2 v[8:9], v[4:5]
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x10000, v4
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; GFX8-NEXT: v_add_u32_e32 v13, vcc, v7, v13
+; GFX8-NEXT: v_addc_u32_e32 v14, vcc, v8, v14, vcc
+; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[3:4]
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x10000, v3
+; GFX8-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
; GFX8-NEXT: s_waitcnt vmcnt(3)
-; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v14
-; GFX8-NEXT: v_addc_u32_e32 v13, vcc, v13, v15, vcc
+; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v13
+; GFX8-NEXT: v_addc_u32_e32 v12, vcc, v12, v14, vcc
; GFX8-NEXT: s_waitcnt vmcnt(2)
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v12
-; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v13, vcc
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v11
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v6, v12, vcc
; GFX8-NEXT: s_waitcnt vmcnt(1)
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v10, v6
-; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v11, v7, vcc
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, v9, v5
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v10, v6, vcc
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v8, v6
-; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v9, v7, vcc
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, v7, v5
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v8, v6, vcc
; GFX8-NEXT: s_cbranch_scc0 .LBB1_2
; GFX8-NEXT: ; %bb.3: ; %while.cond.loopexit
; GFX8-NEXT: ; in Loop: Header=BB1_1 Depth=1
@@ -549,7 +547,10 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: s_branch .LBB1_1
; GFX8-NEXT: .LBB1_5: ; %while.end
-; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v1, s35
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, s34, v0
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[5:6]
; GFX8-NEXT: s_endpgm
;
; GFX900-LABEL: clmem_read:
@@ -575,90 +576,88 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX900-NEXT: v_and_b32_e32 v1, 0xff, v0
; GFX900-NEXT: v_mov_b32_e32 v2, 0
; GFX900-NEXT: v_lshlrev_b32_e32 v0, 17, v0
-; GFX900-NEXT: v_and_b32_e32 v4, 0xfe000000, v0
-; GFX900-NEXT: v_lshlrev_b64 v[2:3], 3, v[1:2]
-; GFX900-NEXT: v_mov_b32_e32 v5, s35
-; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v4
-; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v5, vcc
-; GFX900-NEXT: v_or_b32_e32 v2, v4, v2
-; GFX900-NEXT: v_add_co_u32_e32 v2, vcc, s34, v2
-; GFX900-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GFX900-NEXT: v_lshlrev_b64 v[1:2], 3, v[1:2]
+; GFX900-NEXT: v_and_b32_e32 v0, 0xfe000000, v0
+; GFX900-NEXT: v_or_b32_e32 v1, v0, v1
+; GFX900-NEXT: v_mov_b32_e32 v3, s35
+; GFX900-NEXT: v_add_co_u32_e32 v1, vcc, s34, v1
+; GFX900-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v3, vcc
; GFX900-NEXT: s_movk_i32 s0, 0x5000
-; GFX900-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2
-; GFX900-NEXT: v_mov_b32_e32 v6, 0
-; GFX900-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v1, vcc, s0, v1
+; GFX900-NEXT: v_mov_b32_e32 v5, 0
+; GFX900-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
; GFX900-NEXT: s_movk_i32 s4, 0x7f
-; GFX900-NEXT: v_mov_b32_e32 v7, 0
+; GFX900-NEXT: v_mov_b32_e32 v6, 0
; GFX900-NEXT: s_movk_i32 s2, 0xd000
; GFX900-NEXT: s_movk_i32 s3, 0xe000
; GFX900-NEXT: s_movk_i32 s5, 0xf000
; GFX900-NEXT: .LBB1_1: ; %for.cond.preheader
; GFX900-NEXT: ; =>This Loop Header: Depth=1
; GFX900-NEXT: ; Child Loop BB1_2 Depth 2
-; GFX900-NEXT: v_mov_b32_e32 v5, v3
; GFX900-NEXT: v_mov_b32_e32 v4, v2
+; GFX900-NEXT: v_mov_b32_e32 v3, v1
; GFX900-NEXT: s_mov_b32 s6, 0
; GFX900-NEXT: .LBB1_2: ; %for.body
; GFX900-NEXT: ; Parent Loop BB1_1 Depth=1
; GFX900-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, 0xffffb000, v4
+; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, 0xffffb000, v3
; GFX900-NEXT: s_mov_b64 s[0:1], vcc
-; GFX900-NEXT: v_addc_co_u32_e64 v9, s[0:1], -1, v5, s[0:1]
-; GFX900-NEXT: global_load_dwordx2 v[10:11], v[4:5], off offset:-4096
-; GFX900-NEXT: global_load_dwordx2 v[12:13], v[4:5], off offset:-2048
-; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v4
-; GFX900-NEXT: global_load_dwordx2 v[8:9], v[8:9], off
-; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v5, vcc
+; GFX900-NEXT: v_addc_co_u32_e64 v8, s[0:1], -1, v4, s[0:1]
+; GFX900-NEXT: global_load_dwordx2 v[9:10], v[3:4], off offset:-4096
+; GFX900-NEXT: global_load_dwordx2 v[11:12], v[3:4], off offset:-2048
+; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, 0xffffc000, v3
+; GFX900-NEXT: global_load_dwordx2 v[7:8], v[7:8], off
+; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, -1, v4, vcc
; GFX900-NEXT: s_addk_i32 s6, 0x2000
; GFX900-NEXT: s_cmp_gt_u32 s6, 0x3fffff
; GFX900-NEXT: s_waitcnt vmcnt(0)
-; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, v8, v6
-; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v7, vcc
-; GFX900-NEXT: global_load_dwordx2 v[6:7], v[14:15], off offset:-2048
+; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, v7, v5
+; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v6, vcc
+; GFX900-NEXT: global_load_dwordx2 v[5:6], v[13:14], off offset:-2048
; GFX900-NEXT: s_waitcnt vmcnt(0)
-; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, v6, v8
-; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v7, v9, vcc
-; GFX900-NEXT: global_load_dwordx2 v[7:8], v[14:15], off
-; GFX900-NEXT: v_add_co_u32_e32 v6, vcc, s2, v4
+; GFX900-NEXT: v_add_co_u32_e32 v15, vcc, v5, v7
+; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v6, v8, vcc
+; GFX900-NEXT: global_load_dwordx2 v[6:7], v[13:14], off
+; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, s2, v3
; GFX900-NEXT: s_mov_b64 s[0:1], vcc
; GFX900-NEXT: s_waitcnt vmcnt(0)
-; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, v7, v16
-; GFX900-NEXT: v_addc_co_u32_e64 v7, s[0:1], -1, v5, s[0:1]
-; GFX900-NEXT: global_load_dwordx2 v[6:7], v[6:7], off offset:-2048
-; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, v8, v9, vcc
-; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, s3, v4
-; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, -1, v5, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v6, v15
+; GFX900-NEXT: v_addc_co_u32_e64 v6, s[0:1], -1, v4, s[0:1]
+; GFX900-NEXT: global_load_dwordx2 v[5:6], v[5:6], off offset:-2048
+; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v7, v8, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, s3, v3
+; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, -1, v4, vcc
; GFX900-NEXT: s_waitcnt vmcnt(0)
-; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, v6, v14
-; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, v7, v15, vcc
-; GFX900-NEXT: global_load_dwordx2 v[6:7], v[8:9], off offset:-4096
+; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v5, v13
+; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v6, v14, vcc
+; GFX900-NEXT: global_load_dwordx2 v[5:6], v[7:8], off offset:-4096
; GFX900-NEXT: s_waitcnt vmcnt(0)
-; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, v6, v14
-; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, v7, v15, vcc
-; GFX900-NEXT: global_load_dwordx2 v[6:7], v[8:9], off offset:-2048
+; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v5, v13
+; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v6, v14, vcc
+; GFX900-NEXT: global_load_dwordx2 v[5:6], v[7:8], off offset:-2048
; GFX900-NEXT: s_waitcnt vmcnt(0)
-; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, v6, v14
-; GFX900-NEXT: global_load_dwordx2 v[8:9], v[8:9], off
-; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, v7, v15, vcc
-; GFX900-NEXT: v_add_co_u32_e32 v6, vcc, s5, v4
-; GFX900-NEXT: v_addc_co_u32_e32 v7, vcc, -1, v5, vcc
-; GFX900-NEXT: global_load_dwordx2 v[6:7], v[6:7], off offset:-2048
+; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v5, v13
+; GFX900-NEXT: global_load_dwordx2 v[7:8], v[7:8], off
+; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v6, v14, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, s5, v3
+; GFX900-NEXT: v_addc_co_u32_e32 v6, vcc, -1, v4, vcc
+; GFX900-NEXT: global_load_dwordx2 v[5:6], v[5:6], off offset:-2048
; GFX900-NEXT: s_waitcnt vmcnt(1)
-; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, v8, v14
-; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, v9, v15, vcc
-; GFX900-NEXT: global_load_dwordx2 v[8:9], v[4:5], off
-; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, 0x10000, v4
-; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, v7, v13
+; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, v8, v14, vcc
+; GFX900-NEXT: global_load_dwordx2 v[7:8], v[3:4], off
+; GFX900-NEXT: v_add_co_u32_e32 v3, vcc, 0x10000, v3
+; GFX900-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v4, vcc
; GFX900-NEXT: s_waitcnt vmcnt(1)
-; GFX900-NEXT: v_add_co_u32_e32 v6, vcc, v6, v14
-; GFX900-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v15, vcc
-; GFX900-NEXT: v_add_co_u32_e32 v6, vcc, v10, v6
-; GFX900-NEXT: v_addc_co_u32_e32 v7, vcc, v11, v7, vcc
-; GFX900-NEXT: v_add_co_u32_e32 v6, vcc, v12, v6
-; GFX900-NEXT: v_addc_co_u32_e32 v7, vcc, v13, v7, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, v5, v13
+; GFX900-NEXT: v_addc_co_u32_e32 v6, vcc, v6, v14, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, v9, v5
+; GFX900-NEXT: v_addc_co_u32_e32 v6, vcc, v10, v6, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, v11, v5
+; GFX900-NEXT: v_addc_co_u32_e32 v6, vcc, v12, v6, vcc
; GFX900-NEXT: s_waitcnt vmcnt(0)
-; GFX900-NEXT: v_add_co_u32_e32 v6, vcc, v8, v6
-; GFX900-NEXT: v_addc_co_u32_e32 v7, vcc, v9, v7, vcc
+; GFX900-NEXT: v_add_co_u32_e32 v5, vcc, v7, v5
+; GFX900-NEXT: v_addc_co_u32_e32 v6, vcc, v8, v6, vcc
; GFX900-NEXT: s_cbranch_scc0 .LBB1_2
; GFX900-NEXT: ; %bb.3: ; %while.cond.loopexit
; GFX900-NEXT: ; in Loop: Header=BB1_1 Depth=1
@@ -669,7 +668,10 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX900-NEXT: s_mov_b32 s4, s0
; GFX900-NEXT: s_branch .LBB1_1
; GFX900-NEXT: .LBB1_5: ; %while.end
-; GFX900-NEXT: global_store_dwordx2 v[0:1], v[6:7], off
+; GFX900-NEXT: v_mov_b32_e32 v1, s35
+; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0
+; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX900-NEXT: global_store_dwordx2 v[0:1], v[5:6], off
; GFX900-NEXT: s_endpgm
;
; GFX10-LABEL: clmem_read:
@@ -694,85 +696,83 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX10-NEXT: s_swappc_b64 s[30:31], s[4:5]
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v0
-; GFX10-NEXT: v_lshlrev_b32_e32 v3, 17, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 17, v0
+; GFX10-NEXT: v_mov_b32_e32 v3, 0
; GFX10-NEXT: v_mov_b32_e32 v4, 0
-; GFX10-NEXT: v_mov_b32_e32 v5, 0
; GFX10-NEXT: s_movk_i32 s1, 0x7f
-; GFX10-NEXT: v_lshlrev_b64 v[0:1], 3, v[1:2]
-; GFX10-NEXT: v_and_b32_e32 v2, 0xfe000000, v3
-; GFX10-NEXT: v_or_b32_e32 v0, v2, v0
-; GFX10-NEXT: v_add_co_u32 v3, vcc_lo, v0, s34
-; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, s35, v1, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v0, s0, s34, v2
-; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, 0x5000, v3
-; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s35, 0, s0
-; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b64 v[1:2], 3, v[1:2]
+; GFX10-NEXT: v_and_b32_e32 v0, 0xfe000000, v0
+; GFX10-NEXT: v_or_b32_e32 v1, v0, v1
+; GFX10-NEXT: v_add_co_u32 v1, vcc_lo, v1, s34
+; GFX10-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, s35, v2, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v1, vcc_lo, 0x5000, v1
+; GFX10-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
; GFX10-NEXT: .LBB1_1: ; %for.cond.preheader
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB1_2 Depth 2
-; GFX10-NEXT: v_mov_b32_e32 v7, v3
; GFX10-NEXT: v_mov_b32_e32 v6, v2
+; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: .LBB1_2: ; %for.body
; GFX10-NEXT: ; Parent Loop BB1_1 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
-; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v6, 0xffffb800
-; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, -1, v7, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v10, vcc_lo, v6, 0xffffc800
-; GFX10-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, -1, v7, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v14, vcc_lo, v6, 0xffffd800
-; GFX10-NEXT: v_add_co_ci_u32_e32 v15, vcc_lo, -1, v7, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v18, vcc_lo, v6, 0xffffe800
+; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v5, 0xffffb800
+; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, -1, v6, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v5, 0xffffc800
+; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, -1, v6, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v13, vcc_lo, v5, 0xffffd800
+; GFX10-NEXT: v_add_co_ci_u32_e32 v14, vcc_lo, -1, v6, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v17, vcc_lo, v5, 0xffffe800
; GFX10-NEXT: s_clause 0x2
-; GFX10-NEXT: global_load_dwordx2 v[12:13], v[8:9], off offset:-2048
-; GFX10-NEXT: global_load_dwordx2 v[16:17], v[10:11], off offset:-2048
-; GFX10-NEXT: global_load_dwordx2 v[20:21], v[14:15], off offset:-2048
-; GFX10-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, -1, v7, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v22, vcc_lo, 0xfffff000, v6
-; GFX10-NEXT: v_add_co_ci_u32_e32 v23, vcc_lo, -1, v7, vcc_lo
+; GFX10-NEXT: global_load_dwordx2 v[11:12], v[7:8], off offset:-2048
+; GFX10-NEXT: global_load_dwordx2 v[15:16], v[9:10], off offset:-2048
+; GFX10-NEXT: global_load_dwordx2 v[19:20], v[13:14], off offset:-2048
+; GFX10-NEXT: v_add_co_ci_u32_e32 v18, vcc_lo, -1, v6, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v21, vcc_lo, 0xfffff000, v5
+; GFX10-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, -1, v6, vcc_lo
; GFX10-NEXT: s_clause 0x7
-; GFX10-NEXT: global_load_dwordx2 v[24:25], v[18:19], off offset:-2048
-; GFX10-NEXT: global_load_dwordx2 v[8:9], v[8:9], off
-; GFX10-NEXT: global_load_dwordx2 v[10:11], v[10:11], off
-; GFX10-NEXT: global_load_dwordx2 v[14:15], v[14:15], off
-; GFX10-NEXT: global_load_dwordx2 v[26:27], v[18:19], off
-; GFX10-NEXT: global_load_dwordx2 v[28:29], v[22:23], off
-; GFX10-NEXT: global_load_dwordx2 v[30:31], v[6:7], off offset:-2048
-; GFX10-NEXT: global_load_dwordx2 v[32:33], v[6:7], off
-; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, 0x10000, v6
-; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
+; GFX10-NEXT: global_load_dwordx2 v[23:24], v[17:18], off offset:-2048
+; GFX10-NEXT: global_load_dwordx2 v[7:8], v[7:8], off
+; GFX10-NEXT: global_load_dwordx2 v[9:10], v[9:10], off
+; GFX10-NEXT: global_load_dwordx2 v[13:14], v[13:14], off
+; GFX10-NEXT: global_load_dwordx2 v[25:26], v[17:18], off
+; GFX10-NEXT: global_load_dwordx2 v[27:28], v[21:22], off
+; GFX10-NEXT: global_load_dwordx2 v[29:30], v[5:6], off offset:-2048
+; GFX10-NEXT: global_load_dwordx2 v[31:32], v[5:6], off
+; GFX10-NEXT: v_add_co_u32 v5, vcc_lo, 0x10000, v5
+; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
; GFX10-NEXT: s_addk_i32 s2, 0x2000
; GFX10-NEXT: s_cmp_gt_u32 s2, 0x3fffff
; GFX10-NEXT: s_waitcnt vmcnt(10)
-; GFX10-NEXT: v_add_co_u32 v4, s0, v12, v4
-; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s0, v13, v5, s0
+; GFX10-NEXT: v_add_co_u32 v3, s0, v11, v3
+; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v12, v4, s0
; GFX10-NEXT: s_waitcnt vmcnt(6)
-; GFX10-NEXT: v_add_co_u32 v4, s0, v8, v4
-; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s0, v9, v5, s0
-; GFX10-NEXT: v_add_co_u32 v4, s0, v16, v4
-; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s0, v17, v5, s0
+; GFX10-NEXT: v_add_co_u32 v3, s0, v7, v3
+; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v8, v4, s0
+; GFX10-NEXT: v_add_co_u32 v3, s0, v15, v3
+; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v16, v4, s0
; GFX10-NEXT: s_waitcnt vmcnt(5)
-; GFX10-NEXT: v_add_co_u32 v4, s0, v10, v4
-; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s0, v11, v5, s0
-; GFX10-NEXT: v_add_co_u32 v4, s0, v20, v4
-; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s0, v21, v5, s0
+; GFX10-NEXT: v_add_co_u32 v3, s0, v9, v3
+; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v10, v4, s0
+; GFX10-NEXT: v_add_co_u32 v3, s0, v19, v3
+; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v20, v4, s0
; GFX10-NEXT: s_waitcnt vmcnt(4)
-; GFX10-NEXT: v_add_co_u32 v4, s0, v14, v4
-; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s0, v15, v5, s0
-; GFX10-NEXT: v_add_co_u32 v4, s0, v24, v4
-; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s0, v25, v5, s0
+; GFX10-NEXT: v_add_co_u32 v3, s0, v13, v3
+; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v14, v4, s0
+; GFX10-NEXT: v_add_co_u32 v3, s0, v23, v3
+; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v24, v4, s0
; GFX10-NEXT: s_waitcnt vmcnt(3)
-; GFX10-NEXT: v_add_co_u32 v4, s0, v26, v4
-; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s0, v27, v5, s0
+; GFX10-NEXT: v_add_co_u32 v3, s0, v25, v3
+; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v26, v4, s0
; GFX10-NEXT: s_waitcnt vmcnt(2)
-; GFX10-NEXT: v_add_co_u32 v4, s0, v28, v4
-; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s0, v29, v5, s0
+; GFX10-NEXT: v_add_co_u32 v3, s0, v27, v3
+; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v28, v4, s0
; GFX10-NEXT: s_waitcnt vmcnt(1)
-; GFX10-NEXT: v_add_co_u32 v4, s0, v30, v4
-; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s0, v31, v5, s0
+; GFX10-NEXT: v_add_co_u32 v3, s0, v29, v3
+; GFX10-NEXT: v_add_co_ci_u32_e64 v4, s0, v30, v4, s0
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, v32, v4
-; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v33, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v3, vcc_lo, v31, v3
+; GFX10-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, v32, v4, vcc_lo
; GFX10-NEXT: s_cbranch_scc0 .LBB1_2
; GFX10-NEXT: ; %bb.3: ; %while.cond.loopexit
; GFX10-NEXT: ; in Loop: Header=BB1_1 Depth=1
@@ -783,7 +783,9 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX10-NEXT: s_mov_b32 s1, s0
; GFX10-NEXT: s_branch .LBB1_1
; GFX10-NEXT: .LBB1_5: ; %while.end
-; GFX10-NEXT: global_store_dwordx2 v[0:1], v[4:5], off
+; GFX10-NEXT: v_add_co_u32 v0, s0, s34, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s35, 0, s0
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[3:4], off
; GFX10-NEXT: s_endpgm
;
; GFX90A-LABEL: clmem_read:
@@ -807,18 +809,16 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_swappc_b64 s[30:31], s[4:5]
; GFX90A-NEXT: v_and_b32_e32 v2, 0xff, v0
-; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 17, v0
; GFX90A-NEXT: v_mov_b32_e32 v3, 0
-; GFX90A-NEXT: v_and_b32_e32 v4, 0xfe000000, v0
-; GFX90A-NEXT: v_mov_b32_e32 v5, s35
-; GFX90A-NEXT: v_add_co_u32_e32 v0, vcc, s34, v4
+; GFX90A-NEXT: v_lshlrev_b32_e32 v0, 17, v0
+; GFX90A-NEXT: v_and_b32_e32 v0, 0xfe000000, v0
; GFX90A-NEXT: v_lshlrev_b64 v[2:3], 3, v[2:3]
-; GFX90A-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v5, vcc
-; GFX90A-NEXT: v_or_b32_e32 v2, v4, v2
-; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, s34, v2
-; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GFX90A-NEXT: v_or_b32_e32 v1, v0, v2
+; GFX90A-NEXT: v_mov_b32_e32 v2, s35
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, s34, v1
+; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v2, vcc
; GFX90A-NEXT: s_movk_i32 s0, 0x5000
-; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2
+; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, s0, v1
; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX90A-NEXT: s_movk_i32 s2, 0x7f
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], 0, 0
@@ -859,34 +859,34 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX90A-NEXT: s_addk_i32 s4, 0x2000
; GFX90A-NEXT: s_cmp_gt_u32 s4, 0x3fffff
; GFX90A-NEXT: s_waitcnt vmcnt(8)
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v8, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v8, v4
+; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v9, v5, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(7)
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v18, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v19, v5, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v18, v1
+; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v19, v4, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(5)
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v14, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v16, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v17, v5, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v14, v1
+; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v15, v4, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v16, v1
+; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v17, v4, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(4)
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v24, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v25, v5, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v24, v1
+; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v25, v4, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(3)
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v26, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v27, v5, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v26, v1
+; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v27, v4, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(2)
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v28, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v29, v5, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v28, v1
+; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v29, v4, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(1)
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v20, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v21, v5, vcc
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v5, vcc
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4
-; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v5, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v20, v1
+; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v21, v4, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v10, v1
+; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v11, v4, vcc
+; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v12, v1
+; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v4, vcc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v30, v4
+; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v30, v1
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v31, v5, vcc
; GFX90A-NEXT: s_cbranch_scc0 .LBB1_2
; GFX90A-NEXT: ; %bb.3: ; %while.cond.loopexit
@@ -898,6 +898,9 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX90A-NEXT: s_mov_b32 s2, s4
; GFX90A-NEXT: s_branch .LBB1_1
; GFX90A-NEXT: .LBB1_5: ; %while.end
+; GFX90A-NEXT: v_mov_b32_e32 v1, s35
+; GFX90A-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0
+; GFX90A-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX90A-NEXT: global_store_dwordx2 v[0:1], v[4:5], off
; GFX90A-NEXT: s_endpgm
;
@@ -913,104 +916,102 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_swappc_b64 s[30:31], s[2:3]
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_and_b32 v1, 0xff, v0
-; GFX11-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_lshlrev_b32 v3, 17, v0
+; GFX11-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_lshlrev_b32 v0, 17, v0
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
; GFX11-NEXT: s_movk_i32 s1, 0x7f
-; GFX11-NEXT: v_mov_b32_e32 v5, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_lshlrev_b64 v[0:1], 3, v[1:2]
-; GFX11-NEXT: v_and_b32_e32 v2, 0xfe000000, v3
+; GFX11-NEXT: v_lshlrev_b64 v[1:2], 3, v[1:2]
+; GFX11-NEXT: v_and_b32_e32 v0, 0xfe000000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_or_b32_e32 v0, v2, v0
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, v0, s34
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, s35, v1, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v0, s0, s34, v2
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, 0x5000, v3
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s35, 0, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v6, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v1, v0, v1
+; GFX11-NEXT: v_add_co_u32 v1, vcc_lo, v1, s34
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, s35, v2, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v1, vcc_lo, 0x5000, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
; GFX11-NEXT: .LBB1_1: ; %for.cond.preheader
; GFX11-NEXT: ; =>This Loop Header: Depth=1
; GFX11-NEXT: ; Child Loop BB1_2 Depth 2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v7, v3 :: v_dual_mov_b32 v6, v2
+; GFX11-NEXT: v_dual_mov_b32 v6, v2 :: v_dual_mov_b32 v5, v1
; GFX11-NEXT: s_mov_b32 s2, 0
; GFX11-NEXT: .LBB1_2: ; %for.body
; GFX11-NEXT: ; Parent Loop BB1_1 Depth=1
; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v6, 0xffffc000
-; GFX11-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, -1, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, 0xffffc000, v6
-; GFX11-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, -1, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, 0xffffd000, v6
+; GFX11-NEXT: v_add_co_u32 v7, vcc_lo, v5, 0xffffc000
+; GFX11-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v9, vcc_lo, 0xffffc000, v5
+; GFX11-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v11, vcc_lo, 0xffffd000, v5
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_load_b64 v[14:15], v[8:9], off offset:-4096
-; GFX11-NEXT: global_load_b64 v[10:11], v[10:11], off offset:-2048
-; GFX11-NEXT: v_add_co_ci_u32_e32 v13, vcc_lo, -1, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v6, 0xffffe000
-; GFX11-NEXT: v_add_co_ci_u32_e32 v17, vcc_lo, -1, v7, vcc_lo
-; GFX11-NEXT: global_load_b64 v[12:13], v[12:13], off offset:-2048
-; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, 0xffffe000, v6
+; GFX11-NEXT: global_load_b64 v[13:14], v[7:8], off offset:-4096
+; GFX11-NEXT: global_load_b64 v[9:10], v[9:10], off offset:-2048
+; GFX11-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v15, vcc_lo, v5, 0xffffe000
+; GFX11-NEXT: v_add_co_ci_u32_e32 v16, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT: global_load_b64 v[11:12], v[11:12], off offset:-2048
+; GFX11-NEXT: v_add_co_u32 v17, vcc_lo, 0xffffe000, v5
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_load_b64 v[20:21], v[16:17], off offset:-4096
-; GFX11-NEXT: global_load_b64 v[8:9], v[8:9], off
-; GFX11-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, -1, v7, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, 0xfffff000, v6
-; GFX11-NEXT: v_add_co_ci_u32_e32 v23, vcc_lo, -1, v7, vcc_lo
+; GFX11-NEXT: global_load_b64 v[19:20], v[15:16], off offset:-4096
+; GFX11-NEXT: global_load_b64 v[7:8], v[7:8], off
+; GFX11-NEXT: v_add_co_ci_u32_e32 v18, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v21, vcc_lo, 0xfffff000, v5
+; GFX11-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, -1, v6, vcc_lo
; GFX11-NEXT: s_clause 0x5
-; GFX11-NEXT: global_load_b64 v[18:19], v[18:19], off offset:-2048
-; GFX11-NEXT: global_load_b64 v[16:17], v[16:17], off
-; GFX11-NEXT: global_load_b64 v[22:23], v[22:23], off offset:-2048
-; GFX11-NEXT: global_load_b64 v[24:25], v[6:7], off offset:-4096
-; GFX11-NEXT: global_load_b64 v[26:27], v[6:7], off offset:-2048
-; GFX11-NEXT: global_load_b64 v[28:29], v[6:7], off
-; GFX11-NEXT: v_add_co_u32 v6, vcc_lo, 0x10000, v6
-; GFX11-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
+; GFX11-NEXT: global_load_b64 v[17:18], v[17:18], off offset:-2048
+; GFX11-NEXT: global_load_b64 v[15:16], v[15:16], off
+; GFX11-NEXT: global_load_b64 v[21:22], v[21:22], off offset:-2048
+; GFX11-NEXT: global_load_b64 v[23:24], v[5:6], off offset:-4096
+; GFX11-NEXT: global_load_b64 v[25:26], v[5:6], off offset:-2048
+; GFX11-NEXT: global_load_b64 v[27:28], v[5:6], off
+; GFX11-NEXT: v_add_co_u32 v5, vcc_lo, 0x10000, v5
+; GFX11-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
; GFX11-NEXT: s_addk_i32 s2, 0x2000
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_cmp_gt_u32 s2, 0x3fffff
; GFX11-NEXT: s_waitcnt vmcnt(10)
-; GFX11-NEXT: v_add_co_u32 v4, s0, v14, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, s0, v15, v5, s0
+; GFX11-NEXT: v_add_co_u32 v3, s0, v13, v3
+; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v14, v4, s0
; GFX11-NEXT: s_waitcnt vmcnt(9)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, s0, v10, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, s0, v11, v5, s0
+; GFX11-NEXT: v_add_co_u32 v3, s0, v9, v3
+; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v10, v4, s0
; GFX11-NEXT: s_waitcnt vmcnt(6)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, s0, v8, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, s0, v9, v5, s0
+; GFX11-NEXT: v_add_co_u32 v3, s0, v7, v3
+; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v8, v4, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, s0, v12, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, s0, v13, v5, s0
+; GFX11-NEXT: v_add_co_u32 v3, s0, v11, v3
+; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v12, v4, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, s0, v20, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, s0, v21, v5, s0
+; GFX11-NEXT: v_add_co_u32 v3, s0, v19, v3
+; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v20, v4, s0
; GFX11-NEXT: s_waitcnt vmcnt(5)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, s0, v18, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, s0, v19, v5, s0
+; GFX11-NEXT: v_add_co_u32 v3, s0, v17, v3
+; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v18, v4, s0
; GFX11-NEXT: s_waitcnt vmcnt(4)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, s0, v16, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, s0, v17, v5, s0
+; GFX11-NEXT: v_add_co_u32 v3, s0, v15, v3
+; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v16, v4, s0
; GFX11-NEXT: s_waitcnt vmcnt(3)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, s0, v22, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, s0, v23, v5, s0
+; GFX11-NEXT: v_add_co_u32 v3, s0, v21, v3
+; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v22, v4, s0
; GFX11-NEXT: s_waitcnt vmcnt(2)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, s0, v24, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, s0, v25, v5, s0
+; GFX11-NEXT: v_add_co_u32 v3, s0, v23, v3
+; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v24, v4, s0
; GFX11-NEXT: s_waitcnt vmcnt(1)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v4, s0, v26, v4
-; GFX11-NEXT: v_add_co_ci_u32_e64 v5, s0, v27, v5, s0
+; GFX11-NEXT: v_add_co_u32 v3, s0, v25, v3
+; GFX11-NEXT: v_add_co_ci_u32_e64 v4, s0, v26, v4, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v28, v4
-; GFX11-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v29, v5, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, v27, v3
+; GFX11-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, v28, v4, vcc_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
; GFX11-NEXT: ; %bb.3: ; %while.cond.loopexit
; GFX11-NEXT: ; in Loop: Header=BB1_1 Depth=1
@@ -1021,7 +1022,10 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX11-NEXT: s_mov_b32 s1, s0
; GFX11-NEXT: s_branch .LBB1_1
; GFX11-NEXT: .LBB1_5: ; %while.end
-; GFX11-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX11-NEXT: v_add_co_u32 v0, s0, s34, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s35, 0, s0
+; GFX11-NEXT: global_store_b64 v[0:1], v[3:4], off
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll b/llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll
index b2d13e5..dc53635 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll
@@ -487,10 +487,9 @@ entry:
; GCN-LABEL: {{^}}add_bb_v2i16:
; NOSDWA-NOT: v_add_{{(_co)?}}_u32_sdwa
-; VI: v_readfirstlane_b32 [[LO:s[0-9]+]]
-; VI: v_readfirstlane_b32 [[HI:s[0-9]+]]
-; VI: s_lshr_b32 [[LOSH:s[0-9]+]], [[LO]], 16
-; VI: s_lshr_b32 [[HISH:s[0-9]+]], [[HI]], 16
+; VI: v_add_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-NEXT: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9_10: v_pk_add_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
diff --git a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
index 695577a..50a8d78 100644
--- a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
@@ -6,7 +6,6 @@ define amdgpu_kernel void @break_inserted_outside_of_loop(i32 addrspace(1)* %out
; SI-LABEL: break_inserted_outside_of_loop:
; SI: ; %bb.0: ; %main_body
; SI-NEXT: s_load_dword s2, s[0:1], 0xb
-; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: v_mbcnt_lo_u32_b32_e64 v0, -1, 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_and_b32_e32 v0, s2, v0
@@ -21,16 +20,17 @@ define amdgpu_kernel void @break_inserted_outside_of_loop(i32 addrspace(1)* %out
; SI-NEXT: s_cbranch_execnz .LBB0_1
; SI-NEXT: ; %bb.2: ; %ENDLOOP
; SI-NEXT: s_or_b64 exec, exec, s[2:3]
+; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; FLAT-LABEL: break_inserted_outside_of_loop:
; FLAT: ; %bb.0: ; %main_body
; FLAT-NEXT: s_load_dword s2, s[0:1], 0x2c
-; FLAT-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
; FLAT-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
; FLAT-NEXT: s_waitcnt lgkmcnt(0)
; FLAT-NEXT: v_and_b32_e32 v0, s2, v0
@@ -45,9 +45,11 @@ define amdgpu_kernel void @break_inserted_outside_of_loop(i32 addrspace(1)* %out
; FLAT-NEXT: s_cbranch_execnz .LBB0_1
; FLAT-NEXT: ; %bb.2: ; %ENDLOOP
; FLAT-NEXT: s_or_b64 exec, exec, s[2:3]
+; FLAT-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
; FLAT-NEXT: s_mov_b32 s3, 0xf000
; FLAT-NEXT: s_mov_b32 s2, -1
; FLAT-NEXT: v_mov_b32_e32 v0, 0
+; FLAT-NEXT: s_waitcnt lgkmcnt(0)
; FLAT-NEXT: buffer_store_dword v0, off, s[0:3], 0
; FLAT-NEXT: s_endpgm
main_body:
@@ -165,68 +167,69 @@ define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32
; SI-LABEL: loop_land_info_assert:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9
-; SI-NEXT: s_load_dword s6, s[0:1], 0x0
-; SI-NEXT: s_load_dword s14, s[0:1], 0xc
+; SI-NEXT: s_load_dword s8, s[0:1], 0x0
; SI-NEXT: v_bfrev_b32_e32 v0, 44
+; SI-NEXT: s_mov_b32 s11, 0xf000
+; SI-NEXT: s_mov_b32 s10, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lt_i32 s2, 1
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: s_cmp_lt_i32 s3, 4
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_cmp_lt_i32 s3, 4
+; SI-NEXT: s_cselect_b64 s[6:7], -1, 0
; SI-NEXT: s_cmp_gt_i32 s3, 3
; SI-NEXT: s_cselect_b64 s[2:3], -1, 0
-; SI-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
-; SI-NEXT: v_cmp_lt_f32_e64 s[6:7], |s6|, v0
-; SI-NEXT: s_and_b64 s[0:1], exec, s[4:5]
-; SI-NEXT: s_and_b64 s[2:3], exec, s[2:3]
-; SI-NEXT: s_and_b64 s[4:5], exec, s[6:7]
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_and_b64 s[4:5], s[4:5], s[2:3]
+; SI-NEXT: v_cmp_lt_f32_e64 s[8:9], |s8|, v0
+; SI-NEXT: s_and_b64 s[2:3], exec, s[6:7]
+; SI-NEXT: s_and_b64 s[4:5], exec, s[4:5]
+; SI-NEXT: s_and_b64 s[6:7], exec, s[8:9]
; SI-NEXT: v_mov_b32_e32 v0, 3
; SI-NEXT: s_branch .LBB3_3
; SI-NEXT: .LBB3_1: ; in Loop: Header=BB3_3 Depth=1
; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: s_mov_b64 s[10:11], -1
; SI-NEXT: s_mov_b64 s[12:13], -1
+; SI-NEXT: s_mov_b64 s[14:15], -1
; SI-NEXT: .LBB3_2: ; %Flow
; SI-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; SI-NEXT: s_and_b64 vcc, exec, s[12:13]
+; SI-NEXT: s_and_b64 vcc, exec, s[14:15]
; SI-NEXT: s_cbranch_vccnz .LBB3_8
; SI-NEXT: .LBB3_3: ; %while.cond
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_mov_b64 s[10:11], -1
-; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_mov_b64 s[12:13], -1
-; SI-NEXT: s_mov_b64 vcc, s[0:1]
+; SI-NEXT: s_mov_b64 s[8:9], -1
+; SI-NEXT: s_mov_b64 s[14:15], -1
+; SI-NEXT: s_mov_b64 vcc, s[2:3]
; SI-NEXT: s_cbranch_vccz .LBB3_2
; SI-NEXT: ; %bb.4: ; %convex.exit
; SI-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; SI-NEXT: s_mov_b64 vcc, s[2:3]
+; SI-NEXT: s_mov_b64 vcc, s[4:5]
; SI-NEXT: s_cbranch_vccz .LBB3_1
; SI-NEXT: ; %bb.5: ; %if.end
; SI-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; SI-NEXT: s_mov_b64 s[12:13], -1
-; SI-NEXT: s_mov_b64 vcc, s[4:5]
+; SI-NEXT: s_mov_b64 s[14:15], -1
+; SI-NEXT: s_mov_b64 vcc, s[6:7]
; SI-NEXT: s_cbranch_vccz .LBB3_7
; SI-NEXT: ; %bb.6: ; %if.else
; SI-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: s_mov_b64 s[12:13], 0
+; SI-NEXT: s_mov_b64 s[14:15], 0
; SI-NEXT: .LBB3_7: ; %Flow6
; SI-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; SI-NEXT: s_mov_b64 s[10:11], 0
+; SI-NEXT: s_mov_b64 s[12:13], 0
; SI-NEXT: ; implicit-def: $sgpr8_sgpr9
; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_8: ; %loop.exit.guard4
; SI-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; SI-NEXT: s_and_b64 vcc, exec, s[10:11]
+; SI-NEXT: s_and_b64 vcc, exec, s[12:13]
; SI-NEXT: s_cbranch_vccz .LBB3_3
; SI-NEXT: ; %bb.9: ; %loop.exit.guard
; SI-NEXT: s_and_b64 vcc, exec, s[8:9]
; SI-NEXT: s_cbranch_vccz .LBB3_13
; SI-NEXT: ; %bb.10: ; %for.cond.preheader
-; SI-NEXT: s_cmpk_lt_i32 s14, 0x3e8
+; SI-NEXT: s_load_dword s0, s[0:1], 0xc
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_cmpk_lt_i32 s0, 0x3e8
; SI-NEXT: s_cbranch_scc0 .LBB3_13
; SI-NEXT: ; %bb.11: ; %for.body
; SI-NEXT: s_and_b64 vcc, exec, 0
@@ -240,68 +243,69 @@ define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32
; FLAT-LABEL: loop_land_info_assert:
; FLAT: ; %bb.0: ; %entry
; FLAT-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
-; FLAT-NEXT: s_load_dword s6, s[0:1], 0x0
-; FLAT-NEXT: s_load_dword s14, s[0:1], 0x30
+; FLAT-NEXT: s_load_dword s8, s[0:1], 0x0
; FLAT-NEXT: v_bfrev_b32_e32 v0, 44
+; FLAT-NEXT: s_mov_b32 s11, 0xf000
+; FLAT-NEXT: s_mov_b32 s10, -1
; FLAT-NEXT: s_waitcnt lgkmcnt(0)
; FLAT-NEXT: s_cmp_lt_i32 s2, 1
-; FLAT-NEXT: s_cselect_b64 s[0:1], -1, 0
-; FLAT-NEXT: s_cmp_lt_i32 s3, 4
; FLAT-NEXT: s_cselect_b64 s[4:5], -1, 0
+; FLAT-NEXT: s_cmp_lt_i32 s3, 4
+; FLAT-NEXT: s_cselect_b64 s[6:7], -1, 0
; FLAT-NEXT: s_cmp_gt_i32 s3, 3
; FLAT-NEXT: s_cselect_b64 s[2:3], -1, 0
-; FLAT-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
-; FLAT-NEXT: v_cmp_lt_f32_e64 s[6:7], |s6|, v0
-; FLAT-NEXT: s_and_b64 s[0:1], exec, s[4:5]
-; FLAT-NEXT: s_and_b64 s[2:3], exec, s[2:3]
-; FLAT-NEXT: s_and_b64 s[4:5], exec, s[6:7]
-; FLAT-NEXT: s_mov_b32 s7, 0xf000
-; FLAT-NEXT: s_mov_b32 s6, -1
+; FLAT-NEXT: s_and_b64 s[4:5], s[4:5], s[2:3]
+; FLAT-NEXT: v_cmp_lt_f32_e64 s[8:9], |s8|, v0
+; FLAT-NEXT: s_and_b64 s[2:3], exec, s[6:7]
+; FLAT-NEXT: s_and_b64 s[4:5], exec, s[4:5]
+; FLAT-NEXT: s_and_b64 s[6:7], exec, s[8:9]
; FLAT-NEXT: v_mov_b32_e32 v0, 3
; FLAT-NEXT: s_branch .LBB3_3
; FLAT-NEXT: .LBB3_1: ; in Loop: Header=BB3_3 Depth=1
; FLAT-NEXT: s_mov_b64 s[8:9], 0
-; FLAT-NEXT: s_mov_b64 s[10:11], -1
; FLAT-NEXT: s_mov_b64 s[12:13], -1
+; FLAT-NEXT: s_mov_b64 s[14:15], -1
; FLAT-NEXT: .LBB3_2: ; %Flow
; FLAT-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; FLAT-NEXT: s_and_b64 vcc, exec, s[12:13]
+; FLAT-NEXT: s_and_b64 vcc, exec, s[14:15]
; FLAT-NEXT: s_cbranch_vccnz .LBB3_8
; FLAT-NEXT: .LBB3_3: ; %while.cond
; FLAT-NEXT: ; =>This Inner Loop Header: Depth=1
-; FLAT-NEXT: s_mov_b64 s[10:11], -1
-; FLAT-NEXT: s_mov_b64 s[8:9], -1
; FLAT-NEXT: s_mov_b64 s[12:13], -1
-; FLAT-NEXT: s_mov_b64 vcc, s[0:1]
+; FLAT-NEXT: s_mov_b64 s[8:9], -1
+; FLAT-NEXT: s_mov_b64 s[14:15], -1
+; FLAT-NEXT: s_mov_b64 vcc, s[2:3]
; FLAT-NEXT: s_cbranch_vccz .LBB3_2
; FLAT-NEXT: ; %bb.4: ; %convex.exit
; FLAT-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; FLAT-NEXT: s_mov_b64 vcc, s[2:3]
+; FLAT-NEXT: s_mov_b64 vcc, s[4:5]
; FLAT-NEXT: s_cbranch_vccz .LBB3_1
; FLAT-NEXT: ; %bb.5: ; %if.end
; FLAT-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; FLAT-NEXT: s_mov_b64 s[12:13], -1
-; FLAT-NEXT: s_mov_b64 vcc, s[4:5]
+; FLAT-NEXT: s_mov_b64 s[14:15], -1
+; FLAT-NEXT: s_mov_b64 vcc, s[6:7]
; FLAT-NEXT: s_cbranch_vccz .LBB3_7
; FLAT-NEXT: ; %bb.6: ; %if.else
; FLAT-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; FLAT-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; FLAT-NEXT: buffer_store_dword v0, off, s[8:11], 0
; FLAT-NEXT: s_waitcnt vmcnt(0)
-; FLAT-NEXT: s_mov_b64 s[12:13], 0
+; FLAT-NEXT: s_mov_b64 s[14:15], 0
; FLAT-NEXT: .LBB3_7: ; %Flow6
; FLAT-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; FLAT-NEXT: s_mov_b64 s[10:11], 0
+; FLAT-NEXT: s_mov_b64 s[12:13], 0
; FLAT-NEXT: ; implicit-def: $sgpr8_sgpr9
; FLAT-NEXT: s_branch .LBB3_2
; FLAT-NEXT: .LBB3_8: ; %loop.exit.guard4
; FLAT-NEXT: ; in Loop: Header=BB3_3 Depth=1
-; FLAT-NEXT: s_and_b64 vcc, exec, s[10:11]
+; FLAT-NEXT: s_and_b64 vcc, exec, s[12:13]
; FLAT-NEXT: s_cbranch_vccz .LBB3_3
; FLAT-NEXT: ; %bb.9: ; %loop.exit.guard
; FLAT-NEXT: s_and_b64 vcc, exec, s[8:9]
; FLAT-NEXT: s_cbranch_vccz .LBB3_13
; FLAT-NEXT: ; %bb.10: ; %for.cond.preheader
-; FLAT-NEXT: s_cmpk_lt_i32 s14, 0x3e8
+; FLAT-NEXT: s_load_dword s0, s[0:1], 0x30
+; FLAT-NEXT: s_waitcnt lgkmcnt(0)
+; FLAT-NEXT: s_cmpk_lt_i32 s0, 0x3e8
; FLAT-NEXT: s_cbranch_scc0 .LBB3_13
; FLAT-NEXT: ; %bb.11: ; %for.body
; FLAT-NEXT: s_and_b64 vcc, exec, 0
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
index 5cb44fa..e6b7e02 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
@@ -25,17 +25,17 @@ define amdgpu_ps float @else1(i32 %z, float %v) #0 {
; SI-NEXT: bb.2.if:
; SI-NEXT: successors: %bb.4(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: %3:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[PHI1]], 0, [[PHI1]], 0, 0, implicit $mode, implicit $exec
+ ; SI-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[PHI1]], 0, [[PHI1]], 0, 0, implicit $mode, implicit $exec
; SI-NEXT: S_BRANCH %bb.4
; SI-NEXT: {{ $}}
; SI-NEXT: bb.3.else:
; SI-NEXT: successors: %bb.1(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: %4:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 1077936128, 0, killed [[COPY]], 0, 0, implicit $mode, implicit $exec
+ ; SI-NEXT: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 1077936128, 0, killed [[COPY]], 0, 0, implicit $mode, implicit $exec
; SI-NEXT: S_BRANCH %bb.1
; SI-NEXT: {{ $}}
; SI-NEXT: bb.4.end:
- ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[PHI]], %bb.1, %3, %bb.2
+ ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[PHI]], %bb.1, [[V_ADD_F32_e64_]], %bb.2
; SI-NEXT: SI_END_CF killed [[SI_ELSE]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; SI-NEXT: $vgpr0 = COPY killed [[PHI2]]
; SI-NEXT: SI_RETURN_TO_EPILOG killed $vgpr0
@@ -81,21 +81,21 @@ define amdgpu_ps float @else2(i32 %z, float %v) #0 {
; SI-NEXT: bb.2.if:
; SI-NEXT: successors: %bb.4(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: %4:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+ ; SI-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; SI-NEXT: S_BRANCH %bb.4
; SI-NEXT: {{ $}}
; SI-NEXT: bb.3.else:
; SI-NEXT: successors: %bb.1(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: %5:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 1077936128, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+ ; SI-NEXT: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 1077936128, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; SI-NEXT: S_BRANCH %bb.1
; SI-NEXT: {{ $}}
; SI-NEXT: bb.4.end:
- ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[PHI1]], %bb.1, %4, %bb.2
- ; SI-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[PHI]], %bb.1, %4, %bb.2
+ ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[PHI1]], %bb.1, [[V_ADD_F32_e64_]], %bb.2
+ ; SI-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[PHI]], %bb.1, [[V_ADD_F32_e64_]], %bb.2
; SI-NEXT: SI_END_CF killed [[SI_ELSE]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; SI-NEXT: %15:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[PHI2]], 0, killed [[PHI3]], 0, 0, implicit $mode, implicit $exec
- ; SI-NEXT: $vgpr0 = COPY killed %15
+ ; SI-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[PHI2]], 0, killed [[PHI3]], 0, 0, implicit $mode, implicit $exec
+ ; SI-NEXT: $vgpr0 = COPY killed [[V_ADD_F32_e64_1]]
; SI-NEXT: SI_RETURN_TO_EPILOG killed $vgpr0
main_body:
%cc = icmp sgt i32 %z, 5
@@ -150,21 +150,21 @@ define amdgpu_ps float @else3(i32 %z, float %v, i32 inreg %bound, i32 %x0) #0 {
; SI-NEXT: bb.3.if:
; SI-NEXT: successors: %bb.5(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: %7:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[PHI]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+ ; SI-NEXT: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[PHI]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
; SI-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 1, killed [[PHI4]], 0, implicit $exec
; SI-NEXT: S_BRANCH %bb.5
; SI-NEXT: {{ $}}
; SI-NEXT: bb.4.else:
; SI-NEXT: successors: %bb.2(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: %9:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY2]], 0, [[PHI1]], 0, 0, implicit $mode, implicit $exec
+ ; SI-NEXT: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY2]], 0, [[PHI1]], 0, 0, implicit $mode, implicit $exec
; SI-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 killed [[PHI1]], 3, implicit $exec
; SI-NEXT: S_BRANCH %bb.2
; SI-NEXT: {{ $}}
; SI-NEXT: bb.5.if.end:
; SI-NEXT: successors: %bb.6(0x04000000), %bb.1(0x7c000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI [[PHI3]], %bb.2, %7, %bb.3
+ ; SI-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI [[PHI3]], %bb.2, [[V_MUL_F32_e64_]], %bb.3
; SI-NEXT: [[PHI6:%[0-9]+]]:vgpr_32 = PHI [[PHI2]], %bb.2, [[V_ADD_U32_e64_]], %bb.3
; SI-NEXT: SI_END_CF killed [[SI_ELSE]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; SI-NEXT: [[V_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 1, [[PHI6]], 0, implicit $exec
@@ -174,8 +174,8 @@ define amdgpu_ps float @else3(i32 %z, float %v, i32 inreg %bound, i32 %x0) #0 {
; SI-NEXT: S_BRANCH %bb.6
; SI-NEXT: {{ $}}
; SI-NEXT: bb.6.for.end:
- ; SI-NEXT: %31:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[PHI6]], 0, killed [[PHI5]], 0, 0, implicit $mode, implicit $exec
- ; SI-NEXT: $vgpr0 = COPY killed %31
+ ; SI-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[PHI6]], 0, killed [[PHI5]], 0, 0, implicit $mode, implicit $exec
+ ; SI-NEXT: $vgpr0 = COPY killed [[V_ADD_F32_e64_]]
; SI-NEXT: SI_RETURN_TO_EPILOG killed $vgpr0
entry:
; %break = icmp sgt i32 %bound, 0
@@ -439,8 +439,8 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, float(
; SI-NEXT: bb.10.end:
; SI-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI [[PHI]], %bb.1, [[COPY8]], %bb.5
; SI-NEXT: SI_END_CF killed [[SI_ELSE]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; SI-NEXT: %27:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[PHI5]], 0, killed [[COPY4]], 0, 0, implicit $mode, implicit $exec
- ; SI-NEXT: $vgpr0 = COPY killed %27
+ ; SI-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[PHI5]], 0, killed [[COPY4]], 0, 0, implicit $mode, implicit $exec
+ ; SI-NEXT: $vgpr0 = COPY killed [[V_ADD_F32_e64_]]
; SI-NEXT: SI_RETURN_TO_EPILOG killed $vgpr0
main_body:
%cc = icmp sgt i32 %z, 5
@@ -568,55 +568,50 @@ define protected amdgpu_kernel void @nested_waterfalls(%tex* addrspace(1)* %tex.
; SI-NEXT: {{ $}}
; SI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY killed $sgpr0_sgpr1
; SI-NEXT: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY killed $vgpr0
- ; SI-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.tex.coerce.kernarg.offset.cast, align 4, addrspace 4)
- ; SI-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- ; SI-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY1]](s32), %subreg.sub0, killed [[V_MOV_B32_e32_]], %subreg.sub1
; SI-NEXT: {{ $}}
; SI-NEXT: bb.1.if.then:
; SI-NEXT: successors: %bb.2(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[V_LSHLREV_B64_e64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64_e64 3, killed [[REG_SEQUENCE]], implicit $exec
- ; SI-NEXT: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_ADD_CO_U32_e64 [[S_LOAD_DWORDX2_IMM]].sub0, [[V_LSHLREV_B64_e64_]].sub0, 0, implicit $exec
- ; SI-NEXT: %85:vgpr_32, dead %87:sreg_32_xm0_xexec = V_ADDC_U32_e64 killed [[S_LOAD_DWORDX2_IMM]].sub1, killed [[V_LSHLREV_B64_e64_]].sub1, killed [[V_ADD_CO_U32_e64_1]], 0, implicit $exec
- ; SI-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[V_ADD_CO_U32_e64_]], %subreg.sub0, killed %85, %subreg.sub1
- ; SI-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2 killed [[REG_SEQUENCE1]], 0, 0, implicit $exec :: (load (s64) from %ir.idx, addrspace 1)
- ; SI-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128 = GLOBAL_LOAD_DWORDX4 [[GLOBAL_LOAD_DWORDX2_]], 16, 0, implicit $exec :: (invariant load (s128) from %ir.6 + 16, addrspace 4)
+ ; SI-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.tex.coerce.kernarg.offset.cast, align 4, addrspace 4)
+ ; SI-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 3, killed [[COPY1]](s32), implicit $exec
+ ; SI-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2_SADDR killed [[S_LOAD_DWORDX2_IMM]], killed [[V_LSHLREV_B32_e64_]], 0, 0, implicit $exec :: (load (s64) from %ir.idx, addrspace 1)
+ ; SI-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128 = GLOBAL_LOAD_DWORDX4 [[GLOBAL_LOAD_DWORDX2_SADDR]], 16, 0, implicit $exec :: (invariant load (s128) from %ir.6 + 16, addrspace 4)
; SI-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub3
; SI-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub2
; SI-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub1
; SI-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub0
- ; SI-NEXT: [[GLOBAL_LOAD_DWORDX4_1:%[0-9]+]]:vreg_128 = GLOBAL_LOAD_DWORDX4 [[GLOBAL_LOAD_DWORDX2_]], 0, 0, implicit $exec :: (invariant load (s128) from %ir.6, align 32, addrspace 4)
+ ; SI-NEXT: [[GLOBAL_LOAD_DWORDX4_1:%[0-9]+]]:vreg_128 = GLOBAL_LOAD_DWORDX4 [[GLOBAL_LOAD_DWORDX2_SADDR]], 0, 0, implicit $exec :: (invariant load (s128) from %ir.6, align 32, addrspace 4)
; SI-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX4_1]].sub3
; SI-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX4_1]].sub2
; SI-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX4_1]].sub1
; SI-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_1]].sub0
- ; SI-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_256 = REG_SEQUENCE killed [[COPY9]], %subreg.sub0, killed [[COPY8]], %subreg.sub1, killed [[COPY7]], %subreg.sub2, killed [[COPY6]], %subreg.sub3, killed [[COPY5]], %subreg.sub4, killed [[COPY4]], %subreg.sub5, killed [[COPY3]], %subreg.sub6, killed [[COPY2]], %subreg.sub7
- ; SI-NEXT: [[GLOBAL_LOAD_DWORDX4_2:%[0-9]+]]:vreg_128 = GLOBAL_LOAD_DWORDX4 killed [[GLOBAL_LOAD_DWORDX2_]], 48, 0, implicit $exec :: (invariant load (s128) from %ir.8, addrspace 4)
+ ; SI-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_256 = REG_SEQUENCE killed [[COPY9]], %subreg.sub0, killed [[COPY8]], %subreg.sub1, killed [[COPY7]], %subreg.sub2, killed [[COPY6]], %subreg.sub3, killed [[COPY5]], %subreg.sub4, killed [[COPY4]], %subreg.sub5, killed [[COPY3]], %subreg.sub6, killed [[COPY2]], %subreg.sub7
+ ; SI-NEXT: [[GLOBAL_LOAD_DWORDX4_2:%[0-9]+]]:vreg_128 = GLOBAL_LOAD_DWORDX4 killed [[GLOBAL_LOAD_DWORDX2_SADDR]], 48, 0, implicit $exec :: (invariant load (s128) from %ir.8, addrspace 4)
; SI-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
; SI-NEXT: {{ $}}
; SI-NEXT: bb.2:
; SI-NEXT: successors: %bb.3(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE2]].sub0, implicit $exec
- ; SI-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE2]].sub1, implicit $exec
- ; SI-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
- ; SI-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE3]], [[REG_SEQUENCE2]].sub0_sub1, implicit $exec
- ; SI-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE2]].sub2, implicit $exec
- ; SI-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE2]].sub3, implicit $exec
- ; SI-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
- ; SI-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE4]], [[REG_SEQUENCE2]].sub2_sub3, implicit $exec
+ ; SI-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE]].sub0, implicit $exec
+ ; SI-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE]].sub1, implicit $exec
+ ; SI-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
+ ; SI-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]].sub0_sub1, implicit $exec
+ ; SI-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE]].sub2, implicit $exec
+ ; SI-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE]].sub3, implicit $exec
+ ; SI-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
+ ; SI-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]].sub2_sub3, implicit $exec
; SI-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 killed [[V_CMP_EQ_U64_e64_]], killed [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; SI-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE2]].sub4, implicit $exec
- ; SI-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE2]].sub5, implicit $exec
- ; SI-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_4]], %subreg.sub0, [[V_READFIRSTLANE_B32_5]], %subreg.sub1
- ; SI-NEXT: [[V_CMP_EQ_U64_e64_2:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE5]], [[REG_SEQUENCE2]].sub4_sub5, implicit $exec
+ ; SI-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE]].sub4, implicit $exec
+ ; SI-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE]].sub5, implicit $exec
+ ; SI-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_4]], %subreg.sub0, [[V_READFIRSTLANE_B32_5]], %subreg.sub1
+ ; SI-NEXT: [[V_CMP_EQ_U64_e64_2:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE3]], [[REG_SEQUENCE]].sub4_sub5, implicit $exec
; SI-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 killed [[S_AND_B32_]], killed [[V_CMP_EQ_U64_e64_2]], implicit-def dead $scc
- ; SI-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE2]].sub6, implicit $exec
- ; SI-NEXT: [[V_READFIRSTLANE_B32_7:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE2]].sub7, implicit $exec
- ; SI-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_6]], %subreg.sub0, [[V_READFIRSTLANE_B32_7]], %subreg.sub1
- ; SI-NEXT: [[V_CMP_EQ_U64_e64_3:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE6]], [[REG_SEQUENCE2]].sub6_sub7, implicit $exec
+ ; SI-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE]].sub6, implicit $exec
+ ; SI-NEXT: [[V_READFIRSTLANE_B32_7:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[REG_SEQUENCE]].sub7, implicit $exec
+ ; SI-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_6]], %subreg.sub0, [[V_READFIRSTLANE_B32_7]], %subreg.sub1
+ ; SI-NEXT: [[V_CMP_EQ_U64_e64_3:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE4]], [[REG_SEQUENCE]].sub6_sub7, implicit $exec
; SI-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 killed [[S_AND_B32_1]], killed [[V_CMP_EQ_U64_e64_3]], implicit-def dead $scc
- ; SI-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_256 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_1]], %subreg.sub1, killed [[V_READFIRSTLANE_B32_2]], %subreg.sub2, killed [[V_READFIRSTLANE_B32_3]], %subreg.sub3, killed [[V_READFIRSTLANE_B32_4]], %subreg.sub4, killed [[V_READFIRSTLANE_B32_5]], %subreg.sub5, killed [[V_READFIRSTLANE_B32_6]], %subreg.sub6, killed [[V_READFIRSTLANE_B32_7]], %subreg.sub7
+ ; SI-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_256 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_1]], %subreg.sub1, killed [[V_READFIRSTLANE_B32_2]], %subreg.sub2, killed [[V_READFIRSTLANE_B32_3]], %subreg.sub3, killed [[V_READFIRSTLANE_B32_4]], %subreg.sub4, killed [[V_READFIRSTLANE_B32_5]], %subreg.sub5, killed [[V_READFIRSTLANE_B32_6]], %subreg.sub6, killed [[V_READFIRSTLANE_B32_7]], %subreg.sub7
; SI-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_2]], implicit-def $exec, implicit-def dead $scc, implicit $exec
; SI-NEXT: {{ $}}
; SI-NEXT: bb.3:
@@ -629,20 +624,20 @@ define protected amdgpu_kernel void @nested_waterfalls(%tex* addrspace(1)* %tex.
; SI-NEXT: {{ $}}
; SI-NEXT: [[V_READFIRSTLANE_B32_8:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[GLOBAL_LOAD_DWORDX4_2]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_9:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[GLOBAL_LOAD_DWORDX4_2]].sub1, implicit $exec
- ; SI-NEXT: [[REG_SEQUENCE8:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_8]], %subreg.sub0, [[V_READFIRSTLANE_B32_9]], %subreg.sub1
- ; SI-NEXT: [[V_CMP_EQ_U64_e64_4:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE8]], [[GLOBAL_LOAD_DWORDX4_2]].sub0_sub1, implicit $exec
+ ; SI-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_8]], %subreg.sub0, [[V_READFIRSTLANE_B32_9]], %subreg.sub1
+ ; SI-NEXT: [[V_CMP_EQ_U64_e64_4:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE6]], [[GLOBAL_LOAD_DWORDX4_2]].sub0_sub1, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_10:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[GLOBAL_LOAD_DWORDX4_2]].sub2, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_11:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[GLOBAL_LOAD_DWORDX4_2]].sub3, implicit $exec
- ; SI-NEXT: [[REG_SEQUENCE9:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_10]], %subreg.sub0, [[V_READFIRSTLANE_B32_11]], %subreg.sub1
- ; SI-NEXT: [[V_CMP_EQ_U64_e64_5:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE9]], [[GLOBAL_LOAD_DWORDX4_2]].sub2_sub3, implicit $exec
+ ; SI-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[V_READFIRSTLANE_B32_10]], %subreg.sub0, [[V_READFIRSTLANE_B32_11]], %subreg.sub1
+ ; SI-NEXT: [[V_CMP_EQ_U64_e64_5:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 killed [[REG_SEQUENCE7]], [[GLOBAL_LOAD_DWORDX4_2]].sub2_sub3, implicit $exec
; SI-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 killed [[V_CMP_EQ_U64_e64_4]], killed [[V_CMP_EQ_U64_e64_5]], implicit-def dead $scc
- ; SI-NEXT: [[REG_SEQUENCE10:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_8]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_9]], %subreg.sub1, killed [[V_READFIRSTLANE_B32_10]], %subreg.sub2, killed [[V_READFIRSTLANE_B32_11]], %subreg.sub3
+ ; SI-NEXT: [[REG_SEQUENCE8:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_8]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_9]], %subreg.sub1, killed [[V_READFIRSTLANE_B32_10]], %subreg.sub2, killed [[V_READFIRSTLANE_B32_11]], %subreg.sub3
; SI-NEXT: [[S_AND_SAVEEXEC_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_3]], implicit-def $exec, implicit-def dead $scc, implicit $exec
; SI-NEXT: {{ $}}
; SI-NEXT: bb.5:
; SI-NEXT: successors: %bb.4(0x40000000), %bb.6(0x40000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[IMAGE_SAMPLE_V1_V2_gfx10_:%[0-9]+]]:vgpr_32 = IMAGE_SAMPLE_V1_V2_gfx10 undef %27:vreg_64, [[REG_SEQUENCE7]], killed [[REG_SEQUENCE10]], 1, 1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from custom "ImageResource")
+ ; SI-NEXT: [[IMAGE_SAMPLE_V1_V2_gfx10_:%[0-9]+]]:vgpr_32 = IMAGE_SAMPLE_V1_V2_gfx10 undef %22:vreg_64, [[REG_SEQUENCE5]], killed [[REG_SEQUENCE8]], 1, 1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from custom "ImageResource")
; SI-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, killed [[S_AND_SAVEEXEC_B32_1]], implicit-def dead $scc
; SI-NEXT: SI_WATERFALL_LOOP %bb.4, implicit $exec
; SI-NEXT: {{ $}}
@@ -655,7 +650,7 @@ define protected amdgpu_kernel void @nested_waterfalls(%tex* addrspace(1)* %tex.
; SI-NEXT: {{ $}}
; SI-NEXT: bb.7:
; SI-NEXT: $exec_lo = S_MOV_B32 killed [[S_MOV_B32_]]
- ; SI-NEXT: GLOBAL_STORE_DWORD undef %30:vreg_64, killed [[IMAGE_SAMPLE_V1_V2_gfx10_]], 0, 0, implicit $exec :: (store (s32) into `float addrspace(1)* undef`, addrspace 1)
+ ; SI-NEXT: GLOBAL_STORE_DWORD undef %25:vreg_64, killed [[IMAGE_SAMPLE_V1_V2_gfx10_]], 0, 0, implicit $exec :: (store (s32) into `float addrspace(1)* undef`, addrspace 1)
; SI-NEXT: S_ENDPGM 0
entry:
%0 = tail call i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll
index 1b9f951..9fe7e86 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll
@@ -153,8 +153,9 @@ define protected amdgpu_kernel void @InferPHI(i32 %a, double addrspace(1)* %b, d
; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 1
; CHECK-NEXT: s_cbranch_scc1 .LBB5_1
; CHECK-NEXT: ; %bb.2: ; %bb1
+; CHECK-NEXT: v_mov_b32_e32 v0, s6
+; CHECK-NEXT: v_mov_b32_e32 v1, s7
; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[6:7], s[6:7] op_sel:[0,1]
; CHECK-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; CHECK-NEXT: s_endpgm
entry:
diff --git a/llvm/test/Transforms/Sink/single-succ.ll b/llvm/test/Transforms/Sink/single-succ.ll
index c347318..23f074e 100644
--- a/llvm/test/Transforms/Sink/single-succ.ll
+++ b/llvm/test/Transforms/Sink/single-succ.ll
@@ -4,13 +4,13 @@
define i32 @single_succ(i1 %b, ptr %a) {
; CHECK-LABEL: @single_succ(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[A:%.*]], align 4
; CHECK-NEXT: br label [[IF:%.*]]
; CHECK: if:
; CHECK-NEXT: br i1 [[B:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
; CHECK: then:
; CHECK-NEXT: ret i32 42
; CHECK: else:
+; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[A:%.*]], align 4
; CHECK-NEXT: ret i32 [[L]]
;
entry: