aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll7924
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll301
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll631
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll157
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll231
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll196
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll332
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll609
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll412
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll959
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll49
-rw-r--r--llvm/test/CodeGen/AMDGPU/call-argument-types.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/ds_write2.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll63
-rw-r--r--llvm/test/CodeGen/AMDGPU/finalizebundle.mir52
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-saddr-store.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll100
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/hard-clauses-img-gfx11.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/hard-clauses-img-gfx12.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.gws.barrier-fastregalloc.ll21
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.gws.barrier.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fdot2.bf16.bf16.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fdot2.f16.f16.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll156
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-constant-i1.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-global-i16.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-global-i32.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-global-i8.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-local-i16.ll214
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-lastuse-metadata.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll58
-rw-r--r--llvm/test/CodeGen/AMDGPU/max.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll1765
-rw-r--r--llvm/test/CodeGen/AMDGPU/mixed-vmem-types.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/packed-fp32.ll203
-rw-r--r--llvm/test/CodeGen/AMDGPU/postra-bundle-memops.mir5
-rw-r--r--llvm/test/CodeGen/AMDGPU/postra-bundle-vimage-vsample-gfx12.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll41
-rw-r--r--llvm/test/CodeGen/AMDGPU/scheduler-rp-calc-one-successor-two-predecessors-bug.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/scratch-simple.ll1728
-rw-r--r--llvm/test/CodeGen/AMDGPU/soft-clause-exceeds-register-budget.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/spill-agpr.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/stack-realign.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/twoaddr-bundle.mir57
52 files changed, 7974 insertions, 8520 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll
index 1812e17..10e83b7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll
@@ -189,15 +189,11 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
; GFX10-NEXT: v_mov_b32_e32 v2, s1
; GFX10-NEXT: s_lshr_b32 s6, s1, 16
; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: s_lshr_b32 s1, s1, 24
; GFX10-NEXT: s_lshr_b32 s8, s2, 16
-; GFX10-NEXT: s_and_b32 s9, 0xffff, s2
; GFX10-NEXT: s_lshr_b32 s5, s5, 8
; GFX10-NEXT: v_mov_b32_e32 v5, s0
; GFX10-NEXT: s_lshr_b32 s0, s7, 8
; GFX10-NEXT: v_mov_b32_e32 v6, s6
-; GFX10-NEXT: v_mov_b32_e32 v7, s1
-; GFX10-NEXT: s_lshr_b32 s1, s9, 8
; GFX10-NEXT: v_mov_b32_e32 v8, s5
; GFX10-NEXT: v_mov_b32_e32 v9, s0
; GFX10-NEXT: ds_write_b8 v1, v0
@@ -208,18 +204,22 @@ define amdgpu_kernel void @store_lds_v4i32_align1(ptr addrspace(3) %out, <4 x i3
; GFX10-NEXT: ds_write_b8 v1, v8 offset:1
; GFX10-NEXT: ds_write_b8 v1, v9 offset:5
; GFX10-NEXT: v_mov_b32_e32 v0, s8
-; GFX10-NEXT: v_mov_b32_e32 v3, s2
-; GFX10-NEXT: v_mov_b32_e32 v10, s1
+; GFX10-NEXT: s_lshr_b32 s1, s1, 24
+; GFX10-NEXT: s_and_b32 s9, 0xffff, s2
; GFX10-NEXT: s_lshr_b32 s0, s2, 24
-; GFX10-NEXT: ds_write_b8 v1, v7 offset:7
-; GFX10-NEXT: ds_write_b8 v1, v3 offset:8
-; GFX10-NEXT: ds_write_b8 v1, v10 offset:9
+; GFX10-NEXT: v_mov_b32_e32 v7, s1
+; GFX10-NEXT: s_lshr_b32 s1, s9, 8
+; GFX10-NEXT: v_mov_b32_e32 v3, s2
; GFX10-NEXT: ds_write_b8 v1, v0 offset:10
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: s_and_b32 s0, 0xffff, s3
-; GFX10-NEXT: s_lshr_b32 s1, s3, 16
+; GFX10-NEXT: v_mov_b32_e32 v10, s1
; GFX10-NEXT: s_lshr_b32 s0, s0, 8
+; GFX10-NEXT: s_lshr_b32 s1, s3, 16
; GFX10-NEXT: v_mov_b32_e32 v2, s3
+; GFX10-NEXT: ds_write_b8 v1, v7 offset:7
+; GFX10-NEXT: ds_write_b8 v1, v3 offset:8
+; GFX10-NEXT: ds_write_b8 v1, v10 offset:9
; GFX10-NEXT: v_mov_b32_e32 v3, s0
; GFX10-NEXT: s_lshr_b32 s0, s3, 24
; GFX10-NEXT: v_mov_b32_e32 v4, s1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
index b33b8a7..4a22a91 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/vni8-across-blocks.ll
@@ -272,10 +272,6 @@ define amdgpu_kernel void @v256i8_liveout(ptr addrspace(1) %src1, ptr addrspace(
; GFX906-NEXT: buffer_store_dword v6, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GFX906-NEXT: buffer_store_dword v7, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill
; GFX906-NEXT: buffer_store_dword v8, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill
-; GFX906-NEXT: global_load_dwordx4 v[5:8], v4, s[0:1] offset:16
-; GFX906-NEXT: s_nop 0
-; GFX906-NEXT: global_load_dwordx4 v[9:12], v4, s[0:1] offset:32
-; GFX906-NEXT: global_load_dwordx4 v[13:16], v4, s[0:1] offset:48
; GFX906-NEXT: global_load_dwordx4 v[17:20], v4, s[0:1] offset:64
; GFX906-NEXT: global_load_dwordx4 v[21:24], v4, s[0:1] offset:80
; GFX906-NEXT: global_load_dwordx4 v[25:28], v4, s[0:1] offset:96
@@ -288,6 +284,9 @@ define amdgpu_kernel void @v256i8_liveout(ptr addrspace(1) %src1, ptr addrspace(
; GFX906-NEXT: global_load_dwordx4 v[53:56], v4, s[0:1] offset:208
; GFX906-NEXT: global_load_dwordx4 v[57:60], v4, s[0:1] offset:224
; GFX906-NEXT: global_load_dwordx4 v[0:3], v4, s[0:1] offset:240
+; GFX906-NEXT: global_load_dwordx4 v[5:8], v4, s[0:1] offset:16
+; GFX906-NEXT: global_load_dwordx4 v[9:12], v4, s[0:1] offset:32
+; GFX906-NEXT: global_load_dwordx4 v[13:16], v4, s[0:1] offset:48
; GFX906-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX906-NEXT: s_cbranch_execz .LBB6_2
; GFX906-NEXT: ; %bb.1: ; %bb.1
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
index 74552a5..08e64da 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
@@ -3105,22 +3105,6 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; SI-LABEL: bitcast_v32i32_to_v128i8:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
@@ -3253,6 +3237,22 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; kill: killed $vgpr36
; SI-NEXT: ; implicit-def: $vgpr36
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr45
; SI-NEXT: ; implicit-def: $vgpr43
; SI-NEXT: ; implicit-def: $vgpr41
@@ -3284,14 +3284,13 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; kill: killed $vgpr36
; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB12_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_alignbit_b32 v33, v31, v32, 24
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -3523,7 +3522,6 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; SI-NEXT: s_cbranch_execz .LBB12_4
; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v31, vcc, 3, v31
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v32, vcc, 3, v32
; SI-NEXT: v_alignbit_b32 v33, v31, v32, 24
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
@@ -4317,22 +4315,6 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; VI-LABEL: bitcast_v32i32_to_v128i8:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32
@@ -4437,6 +4419,22 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; VI-NEXT: ; implicit-def: $vgpr39
; VI-NEXT: ; kill: killed $vgpr39
; VI-NEXT: ; implicit-def: $vgpr39
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr59
; VI-NEXT: ; kill: killed $vgpr39
; VI-NEXT: ; implicit-def: $vgpr39
@@ -4542,129 +4540,129 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v26
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[31:32]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v26
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v25
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[29:30]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v24
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v24
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v24
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[27:28]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v23
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v23
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v22
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[25:26]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v22
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v22
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[23:24]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v21
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v20
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v20
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[21:22]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v20
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v19
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v18
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v18
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[17:18]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v17
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v17
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v16
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[15:16]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v16
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v16
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[13:14]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v15
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v14
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[11:12]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v14
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v13
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[9:10]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v12
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v39, 24, v32
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v12
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[7:8]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; VI-NEXT: v_mov_b32_e32 v55, v39
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[5:6]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v11
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[31:32]
+; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[29:30]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[27:28]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[25:26]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[23:24]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[21:22]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[17:18]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[15:16]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[13:14]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[11:12]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[9:10]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v39, 24, v32
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[7:8]
-; VI-NEXT: v_mov_b32_e32 v55, v39
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[5:6]
-; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; VI-NEXT: v_lshrrev_b64 v[41:42], 24, v[1:2]
; VI-NEXT: v_lshrrev_b32_e32 v58, 8, v27
; VI-NEXT: v_lshrrev_b32_e32 v59, 24, v10
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v35, 8, v10
; VI-NEXT: v_lshrrev_b32_e32 v60, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v49, 8, v9
@@ -5286,6 +5284,10 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX9-LABEL: bitcast_v32i32_to_v128i8:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -5302,9 +5304,6 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
; GFX9-NEXT: ; implicit-def: $vgpr40
; GFX9-NEXT: ; kill: killed $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr40
@@ -5437,7 +5436,6 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX9-NEXT: ; kill: killed $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr41
; GFX9-NEXT: ; implicit-def: $vgpr40
-; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
@@ -5493,7 +5491,7 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(29)
+; GFX9-NEXT: s_waitcnt vmcnt(45)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; GFX9-NEXT: ; implicit-def: $vgpr33
; GFX9-NEXT: ; kill: killed $vgpr33
@@ -5508,7 +5506,7 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v32
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: s_waitcnt vmcnt(47)
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v31
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v31
@@ -5520,149 +5518,147 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v30
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v29
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[31:32]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v29
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v28
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v28
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[29:30]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v28
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v27
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[27:28]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v26
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v26
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[25:26]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v25
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v24
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[23:24]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v24
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[21:22]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v23
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v22
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[19:20]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v22
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v21
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[17:18]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v20
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v20
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v20
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[15:16]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v19
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v19
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v18
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[13:14]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v18
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v17
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[11:12]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v17
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v16
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v16
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[9:10]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v15
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v14
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[7:8]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v13
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v12
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[5:6]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v11
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v10
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[31:32]
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[29:30]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[27:28]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[25:26]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[23:24]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[21:22]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[19:20]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[17:18]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[15:16]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[13:14]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[11:12]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[9:10]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[7:8]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[5:6]
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; GFX9-NEXT: v_lshrrev_b32_e32 v57, 8, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 8, v15
@@ -5670,7 +5666,9 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 8, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 8, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v52, 8, v11
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v10
+; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 8, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 24, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v8
@@ -5698,7 +5696,7 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX9-NEXT: s_cbranch_execz .LBB12_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v32, 3, v32
-; GFX9-NEXT: s_waitcnt vmcnt(28)
+; GFX9-NEXT: s_waitcnt vmcnt(44)
; GFX9-NEXT: v_add_u32_e32 v31, 3, v31
; GFX9-NEXT: v_lshrrev_b64 v[33:34], 24, v[31:32]
; GFX9-NEXT: v_add_u32_e32 v30, 3, v30
@@ -6755,7 +6753,11 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v32i32_to_v128i8:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x2
+; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:88
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:84
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:80
@@ -6776,10 +6778,6 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:20
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:12
-; GFX11-FAKE16-NEXT: s_clause 0x2
-; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
-; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr75
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr74
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
@@ -7416,7 +7414,7 @@ define <128 x i8> @bitcast_v32i32_to_v128i8(<32 x i32> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:12
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:20
@@ -10666,7 +10664,7 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_xor_saveexec_b32 s4, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v16, s32
; GFX11-NEXT: scratch_store_b32 off, v17, s32 offset:4
; GFX11-NEXT: scratch_store_b32 off, v18, s32 offset:8
@@ -11599,7 +11597,7 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_readlane_b32 s35, v16, 3
; GFX11-NEXT: v_readlane_b32 s34, v16, 2
; GFX11-NEXT: s_xor_saveexec_b32 s0, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v16, off, s32
; GFX11-NEXT: scratch_load_b32 v17, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v18, off, s32 offset:8
@@ -11812,13 +11810,26 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:208
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:188
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(4) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196
@@ -11979,44 +11990,30 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:356
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:384
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380
; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v3
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:364
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:372
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:384
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB14_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
@@ -12025,11 +12022,11 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v9, 0xff, v49
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
@@ -12632,7 +12629,6 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB14_4
; SI-NEXT: ; %bb.3: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
@@ -12646,8 +12642,8 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v9, 0xff, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
@@ -13327,13 +13323,25 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -13470,34 +13478,20 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v3
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v0
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -13983,7 +13977,6 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_add_u16_e32 v9, 3, v61
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
@@ -14561,13 +14554,27 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(16)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -14709,34 +14716,20 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v3
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v0
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -15223,7 +15216,6 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_add_u16_e32 v9, 3, v61
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
@@ -16362,7 +16354,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v32i32:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:592
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:588
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:584
@@ -16395,7 +16387,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:476
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:472
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:468
-; GFX11-FAKE16-NEXT: s_clause 0x12
+; GFX11-FAKE16-NEXT: s_clause 0x12 ; 76-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:464
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:460
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:456
@@ -17336,7 +17328,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, v35, v36
; GFX11-FAKE16-NEXT: .LBB14_4: ; %end
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:392
; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:396
; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:400
@@ -17369,7 +17361,7 @@ define <32 x i32> @bitcast_v128i8_to_v32i32(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:508
; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:512
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:516
-; GFX11-FAKE16-NEXT: s_clause 0x12
+; GFX11-FAKE16-NEXT: s_clause 0x12 ; 76-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:520
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:524
; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:528
@@ -18086,24 +18078,13 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB15_3
; SI-NEXT: .LBB15_2:
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v55, v56
; SI-NEXT: v_mov_b32_e32 v42, v46
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
@@ -18114,10 +18095,22 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: .LBB15_3: ; %Flow
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_mov_b32_e32 v35, v57
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
@@ -18127,7 +18120,6 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
@@ -18722,13 +18714,13 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v5
; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v7
; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v9
; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v11
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v13
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v17
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
@@ -18956,11 +18948,11 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_or_b32_sdwa v0, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v4, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v2, v6, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -18970,11 +18962,8 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
@@ -18982,6 +18971,8 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -19190,12 +19181,6 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_mov_b64 s[4:5], 0
; VI-NEXT: s_branch .LBB15_3
; VI-NEXT: .LBB15_2:
-; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v44, v56
; VI-NEXT: v_mov_b32_e32 v41, v33
; VI-NEXT: v_mov_b32_e32 v50, v40
@@ -19213,6 +19198,12 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v54, v53
; VI-NEXT: v_mov_b32_e32 v52, v36
; VI-NEXT: v_mov_b32_e32 v49, v51
@@ -19222,7 +19213,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v51, v41
; VI-NEXT: v_mov_b32_e32 v36, v44
; VI-NEXT: v_mov_b32_e32 v53, v54
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_mov_b32_e32 v54, v60
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
@@ -19235,7 +19226,6 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: ; %bb.4: ; %cmp.true
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
@@ -19820,8 +19810,8 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v11
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v9
; GFX9-NEXT: s_waitcnt vmcnt(5)
@@ -20000,16 +19990,18 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
; GFX9-NEXT: s_lshl_b32 s7, s23, 8
; GFX9-NEXT: s_lshl_b32 s8, s27, 8
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
@@ -20036,9 +20028,8 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -20054,14 +20045,16 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v14, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
@@ -20073,10 +20066,11 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v0, v16, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_mov_b32_e32 v61, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -20089,10 +20083,12 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
@@ -20106,17 +20102,22 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v53, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v0, v32, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v1, v51, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v58, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -20132,45 +20133,24 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v35, v62
; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(11)
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(10)
; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(9)
; GFX9-NEXT: v_or_b32_sdwa v0, v54, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(7)
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v1, v32, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_or_b32_sdwa v0, v44, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v0, v48, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v49, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v39, v31 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v40, v30
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -20221,18 +20201,6 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB15_3
; GFX9-NEXT: .LBB15_2:
-; GFX9-NEXT: v_mov_b32_e32 v38, v51
-; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v33, v43
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
@@ -20246,6 +20214,18 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v38, v51
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v35, v62
; GFX9-NEXT: v_mov_b32_e32 v36, v31
; GFX9-NEXT: v_mov_b32_e32 v40, v30
@@ -20683,7 +20663,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v128i8_to_v32i32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:476
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:472
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:468
@@ -20716,7 +20696,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:360
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:356
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:352
-; GFX11-TRUE16-NEXT: s_clause 0x7
+; GFX11-TRUE16-NEXT: s_clause 0x7 ; 32-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:348
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:344
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:340
@@ -21573,7 +21553,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: .LBB15_3: ; %end
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:320
; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:324
; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:328
@@ -21606,7 +21586,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:436
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:440
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:444
-; GFX11-TRUE16-NEXT: s_clause 0x7
+; GFX11-TRUE16-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:448
; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:452
; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:456
@@ -21624,7 +21604,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v32i32_scalar:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:476
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:472
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:468
@@ -21657,7 +21637,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:360
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:356
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:352
-; GFX11-FAKE16-NEXT: s_clause 0x7
+; GFX11-FAKE16-NEXT: s_clause 0x7 ; 32-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:348
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:344
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:340
@@ -22514,7 +22494,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: .LBB15_3: ; %end
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:320
; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:324
; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:328
@@ -22547,7 +22527,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:436
; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:440
; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:444
-; GFX11-FAKE16-NEXT: s_clause 0x7
+; GFX11-FAKE16-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:448
; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:452
; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:456
@@ -26129,7 +26109,10 @@ define <32 x i32> @bitcast_v64bf16_to_v32i32(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v32i32:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0x1
+; GFX11-TRUE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:60
@@ -26146,9 +26129,6 @@ define <32 x i32> @bitcast_v64bf16_to_v32i32(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:16
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:12
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:8
-; GFX11-TRUE16-NEXT: s_clause 0x1
-; GFX11-TRUE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v32
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
@@ -26714,7 +26694,7 @@ define <32 x i32> @bitcast_v64bf16_to_v32i32(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v60 :: v_dual_mov_b32 v29, v61
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v62 :: v_dual_mov_b32 v31, v63
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:8
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:12
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:16
@@ -29181,7 +29161,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
@@ -29214,7 +29194,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
@@ -29247,7 +29227,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
-; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: s_clause 0x6 ; 28-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
@@ -30049,7 +30029,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
@@ -30082,7 +30062,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
@@ -30115,7 +30095,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: s_clause 0x6 ; 28-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
@@ -30155,7 +30135,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
@@ -30188,7 +30168,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
@@ -30221,7 +30201,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: s_clause 0x8 ; 36-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
@@ -30913,7 +30893,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
@@ -30946,7 +30926,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
@@ -30979,7 +30959,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: s_clause 0x8 ; 36-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
@@ -34732,7 +34712,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
@@ -34765,7 +34745,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
@@ -34798,7 +34778,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
@@ -34876,7 +34856,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v185, off, s32
; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
@@ -34909,7 +34889,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
@@ -34942,7 +34922,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
@@ -35000,6 +34980,10 @@ define <64 x i16> @bitcast_v32i32_to_v64i16(<32 x i32> %a, i32 %b) {
; SI-LABEL: bitcast_v32i32_to_v64i16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
+; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -35016,10 +35000,6 @@ define <64 x i16> @bitcast_v32i32_to_v64i16(<32 x i32> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
-; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; implicit-def: $vgpr60
; SI-NEXT: ; implicit-def: $vgpr58
; SI-NEXT: ; implicit-def: $vgpr63
@@ -35051,14 +35031,13 @@ define <64 x i16> @bitcast_v32i32_to_v64i16(<32 x i32> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr49
; SI-NEXT: ; kill: killed $vgpr39
; SI-NEXT: ; implicit-def: $vgpr39
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB24_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_alignbit_b32 v33, v31, v32, 16
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: v_alignbit_b32 v34, v30, v29, 16
@@ -35103,7 +35082,6 @@ define <64 x i16> @bitcast_v32i32_to_v64i16(<32 x i32> %a, i32 %b) {
; SI-NEXT: s_cbranch_execz .LBB24_4
; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v31, vcc, 3, v31
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v32, vcc, 3, v32
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
@@ -35356,7 +35334,7 @@ define <64 x i16> @bitcast_v32i32_to_v64i16(<32 x i32> %a, i32 %b) {
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
@@ -36338,7 +36316,13 @@ define <32 x i32> @bitcast_v64i16_to_v32i32(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v33
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
@@ -36370,12 +36354,6 @@ define <32 x i32> @bitcast_v64i16_to_v32i32(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:100
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -36391,7 +36369,6 @@ define <32 x i32> @bitcast_v64i16_to_v32i32(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v41
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
@@ -36608,7 +36585,6 @@ define <32 x i32> @bitcast_v64i16_to_v32i32(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v41
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
@@ -37782,7 +37758,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
@@ -37815,7 +37791,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
@@ -37848,7 +37824,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
@@ -37926,7 +37902,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v185, off, s32
; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
@@ -37959,7 +37935,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
@@ -37992,7 +37968,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
@@ -40033,22 +40009,6 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; SI-LABEL: bitcast_v32f32_to_v128i8:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
@@ -40181,6 +40141,22 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; kill: killed $vgpr36
; SI-NEXT: ; implicit-def: $vgpr36
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr45
; SI-NEXT: ; implicit-def: $vgpr43
; SI-NEXT: ; implicit-def: $vgpr41
@@ -40212,14 +40188,13 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; kill: killed $vgpr36
; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB36_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_alignbit_b32 v33, v31, v32, 24
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -40451,7 +40426,6 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; SI-NEXT: s_cbranch_execz .LBB36_4
; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v31, 1.0, v31
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_f32_e32 v32, 1.0, v32
; SI-NEXT: v_alignbit_b32 v33, v31, v32, 24
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
@@ -41245,22 +41219,6 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; VI-LABEL: bitcast_v32f32_to_v128i8:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32
@@ -41365,6 +41323,22 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; VI-NEXT: ; implicit-def: $vgpr39
; VI-NEXT: ; kill: killed $vgpr39
; VI-NEXT: ; implicit-def: $vgpr39
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr59
; VI-NEXT: ; kill: killed $vgpr39
; VI-NEXT: ; implicit-def: $vgpr39
@@ -41470,129 +41444,129 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v26
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[31:32]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v26
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v25
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[29:30]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v24
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v24
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v24
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[27:28]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v23
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v23
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v22
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[25:26]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v22
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v22
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[23:24]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v21
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v20
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v20
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[21:22]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v20
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v19
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v18
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v18
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[17:18]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v17
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v17
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v16
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[15:16]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v16
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v16
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[13:14]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v15
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v14
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[11:12]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v14
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v13
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[9:10]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v12
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v39, 24, v32
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v12
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[7:8]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; VI-NEXT: v_mov_b32_e32 v55, v39
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[5:6]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v11
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[31:32]
+; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[29:30]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[27:28]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[25:26]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[23:24]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[21:22]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[17:18]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[15:16]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[13:14]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[11:12]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[9:10]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v39, 24, v32
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[7:8]
-; VI-NEXT: v_mov_b32_e32 v55, v39
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[5:6]
-; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; VI-NEXT: v_lshrrev_b64 v[41:42], 24, v[1:2]
; VI-NEXT: v_lshrrev_b32_e32 v58, 8, v27
; VI-NEXT: v_lshrrev_b32_e32 v59, 24, v10
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v35, 8, v10
; VI-NEXT: v_lshrrev_b32_e32 v60, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v49, 8, v9
@@ -42214,6 +42188,10 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX9-LABEL: bitcast_v32f32_to_v128i8:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -42230,9 +42208,6 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
; GFX9-NEXT: ; implicit-def: $vgpr40
; GFX9-NEXT: ; kill: killed $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr40
@@ -42365,7 +42340,6 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX9-NEXT: ; kill: killed $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr41
; GFX9-NEXT: ; implicit-def: $vgpr40
-; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
@@ -42421,7 +42395,7 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(29)
+; GFX9-NEXT: s_waitcnt vmcnt(45)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; GFX9-NEXT: ; implicit-def: $vgpr33
; GFX9-NEXT: ; kill: killed $vgpr33
@@ -42436,7 +42410,7 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v32
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: s_waitcnt vmcnt(47)
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v31
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v31
@@ -42448,149 +42422,147 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v30
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v29
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[31:32]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v29
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v28
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v28
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[29:30]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v28
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v27
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[27:28]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v26
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v26
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[25:26]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v25
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v24
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[23:24]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v24
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[21:22]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v23
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v22
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[19:20]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v22
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v21
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[17:18]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v20
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v20
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v20
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[15:16]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v19
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v19
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v18
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[13:14]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v18
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v17
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[11:12]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v17
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v16
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v16
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[9:10]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v15
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v14
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[7:8]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v13
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v12
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[5:6]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v11
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v10
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[31:32]
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[29:30]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[27:28]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[25:26]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[23:24]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[21:22]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[19:20]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[17:18]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[15:16]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[13:14]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[11:12]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[9:10]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[7:8]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[5:6]
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; GFX9-NEXT: v_lshrrev_b32_e32 v57, 8, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 8, v15
@@ -42598,7 +42570,9 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 8, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 8, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v52, 8, v11
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v10
+; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 8, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 24, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v8
@@ -42626,7 +42600,7 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX9-NEXT: s_cbranch_execz .LBB36_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v32, 1.0, v32
-; GFX9-NEXT: s_waitcnt vmcnt(28)
+; GFX9-NEXT: s_waitcnt vmcnt(44)
; GFX9-NEXT: v_add_f32_e32 v31, 1.0, v31
; GFX9-NEXT: v_lshrrev_b64 v[33:34], 24, v[31:32]
; GFX9-NEXT: v_add_f32_e32 v30, 1.0, v30
@@ -43666,7 +43640,11 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v32f32_to_v128i8:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x2
+; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:88
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:84
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:80
@@ -43687,10 +43665,6 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:20
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:12
-; GFX11-FAKE16-NEXT: s_clause 0x2
-; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
-; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr75
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr74
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
@@ -44310,7 +44284,7 @@ define <128 x i8> @bitcast_v32f32_to_v128i8(<32 x float> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:12
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:20
@@ -44770,27 +44744,11 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 24
-; SI-NEXT: v_lshr_b64 v[17:18], v[48:49], 8
-; SI-NEXT: v_add_f32_e64 v53, s23, 1.0
-; SI-NEXT: v_add_f32_e64 v52, s22, 1.0
; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 24
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 16
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 8
; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v2
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v2
@@ -44842,24 +44800,33 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v12
+; SI-NEXT: v_lshr_b64 v[17:18], v[48:49], 8
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v16
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v16
+; SI-NEXT: v_add_f32_e64 v53, s23, 1.0
+; SI-NEXT: v_add_f32_e64 v52, s22, 1.0
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v16
+; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 24
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v21
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v21
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v21
+; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 16
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v26
@@ -44868,6 +44835,8 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v26
; SI-NEXT: v_add_f32_e64 v41, s21, 1.0
; SI-NEXT: v_add_f32_e64 v40, s20, 1.0
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v26
@@ -44875,6 +44844,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v57, s16, 1.0
; SI-NEXT: v_add_f32_e64 v46, s19, 1.0
; SI-NEXT: v_add_f32_e64 v45, s18, 1.0
+; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 8
; SI-NEXT: v_lshr_b64 v[31:32], v[40:41], 16
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -44885,6 +44855,8 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v30
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
; SI-NEXT: v_lshr_b64 v[27:28], v[40:41], 24
; SI-NEXT: v_lshr_b64 v[33:34], v[45:46], 24
; SI-NEXT: v_lshr_b64 v[38:39], v[45:46], 8
@@ -45408,33 +45380,33 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v13, s98
+; SI-NEXT: v_mov_b32_e32 v27, s62
; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v13, s46
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v13, s56
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v13, s58
-; SI-NEXT: v_mov_b32_e32 v27, s62
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: v_mov_b32_e32 v13, s46
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v27, s72
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: v_mov_b32_e32 v13, s56
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v27, s74
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: v_mov_b32_e32 v13, s58
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v27, s76
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v57, s16
@@ -45468,6 +45440,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v3, s6
; SI-NEXT: v_mov_b32_e32 v4, s7
; SI-NEXT: v_readlane_b32 s5, v61, 1
+; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_mov_b32_e32 v13, s60
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v27, s78
@@ -45809,17 +45782,16 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xff, v15
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13
; SI-NEXT: v_or_b32_e32 v13, v14, v13
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v17
; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v14, 0xff, v14
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v17
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_or_b32_e32 v13, v13, v14
@@ -46687,6 +46659,10 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: s_branch .LBB37_2
; VI-NEXT: .LBB37_4:
+; VI-NEXT: v_mov_b32_e32 v53, s46
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s56
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: v_readlane_b32 s4, v62, 0
; VI-NEXT: v_mov_b32_e32 v48, s4
@@ -46764,6 +46740,9 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v39, s4
; VI-NEXT: v_readlane_b32 s4, v62, 26
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s58
; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v39, s4
; VI-NEXT: v_readlane_b32 s4, v62, 27
@@ -46841,6 +46820,9 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: v_readlane_b32 s4, v62, 51
; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v39, s4
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s60
; VI-NEXT: v_readlane_b32 s4, v62, 52
; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v39, s4
@@ -46859,40 +46841,6 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: v_readlane_b32 s4, v62, 57
; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v39, s4
-; VI-NEXT: v_mov_b32_e32 v53, s46
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v53, s56
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v53, s58
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v53, s60
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v53, s62
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v53, s72
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v53, s74
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v53, s76
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v53, s78
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v53, s88
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v53, s90
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v31, s16
; VI-NEXT: v_mov_b32_e32 v32, s17
; VI-NEXT: v_mov_b32_e32 v29, s18
@@ -46946,11 +46894,35 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v42, s82
; VI-NEXT: v_mov_b32_e32 v37, s81
; VI-NEXT: v_mov_b32_e32 v50, s80
-; VI-NEXT: v_mov_b32_e32 v53, s30
-; VI-NEXT: v_mov_b32_e32 v54, s34
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v39, s36
; VI-NEXT: v_mov_b32_e32 v40, s38
; VI-NEXT: v_mov_b32_e32 v41, s48
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s62
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s72
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s74
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s76
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s78
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s88
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s90
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v53, s30
+; VI-NEXT: v_mov_b32_e32 v54, s34
; VI-NEXT: .LBB37_5: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v34, 8, v34
; VI-NEXT: v_lshlrev_b32_e32 v35, 8, v35
@@ -48123,10 +48095,8 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_readlane_b32 s4, v62, 49
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v40, s4
-; GFX9-NEXT: v_mov_b32_e32 v49, s52
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v40, s46
-; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -48175,6 +48145,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v40, s94
+; GFX9-NEXT: v_mov_b32_e32 v49, s52
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
@@ -48222,6 +48193,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v54, s55
; GFX9-NEXT: v_mov_b32_e32 v50, s53
; GFX9-NEXT: v_mov_b32_e32 v60, s54
+; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v49, s51
; GFX9-NEXT: v_mov_b32_e32 v59, s50
; GFX9-NEXT: v_mov_b32_e32 v58, s49
@@ -48646,7 +48618,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_or_saveexec_b32 s4, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:76
; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:80
; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:84
@@ -48681,7 +48653,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_writelane_b32 v76, s101, 5
; GFX11-NEXT: s_mov_b32 vcc_hi, 0
; GFX11-NEXT: s_and_b32 s42, vcc_lo, exec_lo
-; GFX11-NEXT: s_clause 0x12
+; GFX11-NEXT: s_clause 0x12 ; 76-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:72
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:68
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:64
@@ -49601,7 +49573,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: scratch_store_b128 v0, v[11:14], off offset:80
; GFX11-NEXT: scratch_store_b128 v0, v[7:10], off offset:96
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
-; GFX11-NEXT: s_clause 0x12
+; GFX11-NEXT: s_clause 0x12 ; 76-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v74, off, s32
; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:8
@@ -49663,7 +49635,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_readlane_b32 s31, v75, 1
; GFX11-NEXT: v_readlane_b32 s30, v75, 0
; GFX11-NEXT: s_or_saveexec_b32 s0, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:76
; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:80
; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:84
@@ -49876,13 +49848,26 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:208
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:188
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(4) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196
@@ -50043,44 +50028,30 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:356
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:384
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380
; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v3
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:364
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:372
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:384
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB38_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
@@ -50089,11 +50060,11 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v9, 0xff, v49
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
@@ -50696,7 +50667,6 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB38_4
; SI-NEXT: ; %bb.3: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
@@ -50710,8 +50680,8 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v9, 0xff, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
@@ -51391,13 +51361,25 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -51534,34 +51516,20 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v3
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v0
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -52047,7 +52015,6 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_add_u16_e32 v9, 3, v61
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
@@ -52625,13 +52592,27 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(16)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -52773,34 +52754,20 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v3
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v0
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -53287,7 +53254,6 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_add_u16_e32 v9, 3, v61
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
@@ -54426,7 +54392,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v32f32:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:592
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:588
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:584
@@ -54459,7 +54425,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:476
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:472
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:468
-; GFX11-FAKE16-NEXT: s_clause 0x12
+; GFX11-FAKE16-NEXT: s_clause 0x12 ; 76-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:464
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:460
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:456
@@ -55400,7 +55366,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, v35, v36
; GFX11-FAKE16-NEXT: .LBB38_4: ; %end
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:392
; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:396
; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:400
@@ -55433,7 +55399,7 @@ define <32 x float> @bitcast_v128i8_to_v32f32(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:508
; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:512
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:516
-; GFX11-FAKE16-NEXT: s_clause 0x12
+; GFX11-FAKE16-NEXT: s_clause 0x12 ; 76-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:520
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:524
; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:528
@@ -56150,24 +56116,13 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB39_3
; SI-NEXT: .LBB39_2:
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v55, v56
; SI-NEXT: v_mov_b32_e32 v42, v46
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
@@ -56178,10 +56133,22 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: .LBB39_3: ; %Flow
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_mov_b32_e32 v35, v57
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
@@ -56191,7 +56158,6 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
@@ -56786,13 +56752,13 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v5
; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v7
; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v9
; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v11
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v13
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v17
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
@@ -57020,11 +56986,11 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_or_b32_sdwa v0, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v4, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v2, v6, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -57034,11 +57000,8 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
@@ -57046,6 +57009,8 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -57254,12 +57219,6 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: s_mov_b64 s[4:5], 0
; VI-NEXT: s_branch .LBB39_3
; VI-NEXT: .LBB39_2:
-; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v44, v56
; VI-NEXT: v_mov_b32_e32 v41, v33
; VI-NEXT: v_mov_b32_e32 v50, v40
@@ -57277,6 +57236,12 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v54, v53
; VI-NEXT: v_mov_b32_e32 v52, v36
; VI-NEXT: v_mov_b32_e32 v49, v51
@@ -57286,7 +57251,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: v_mov_b32_e32 v51, v41
; VI-NEXT: v_mov_b32_e32 v36, v44
; VI-NEXT: v_mov_b32_e32 v53, v54
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_mov_b32_e32 v54, v60
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
@@ -57299,7 +57264,6 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: ; %bb.4: ; %cmp.true
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
@@ -57884,8 +57848,8 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v11
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v9
; GFX9-NEXT: s_waitcnt vmcnt(5)
@@ -58064,16 +58028,18 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
; GFX9-NEXT: s_lshl_b32 s7, s23, 8
; GFX9-NEXT: s_lshl_b32 s8, s27, 8
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
@@ -58100,9 +58066,8 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -58118,14 +58083,16 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v14, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
@@ -58137,10 +58104,11 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v0, v16, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_mov_b32_e32 v61, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -58153,10 +58121,12 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
@@ -58170,17 +58140,22 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v53, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v0, v32, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v1, v51, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v58, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -58196,45 +58171,24 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v35, v62
; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(11)
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(10)
; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(9)
; GFX9-NEXT: v_or_b32_sdwa v0, v54, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(7)
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v1, v32, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_or_b32_sdwa v0, v44, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v0, v48, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v49, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v39, v31 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v40, v30
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -58285,18 +58239,6 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB39_3
; GFX9-NEXT: .LBB39_2:
-; GFX9-NEXT: v_mov_b32_e32 v38, v51
-; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v33, v43
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
@@ -58310,6 +58252,18 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v38, v51
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v35, v62
; GFX9-NEXT: v_mov_b32_e32 v36, v31
; GFX9-NEXT: v_mov_b32_e32 v40, v30
@@ -58747,7 +58701,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v128i8_to_v32f32_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:476
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:472
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:468
@@ -58780,7 +58734,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:360
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:356
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:352
-; GFX11-TRUE16-NEXT: s_clause 0x7
+; GFX11-TRUE16-NEXT: s_clause 0x7 ; 32-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:348
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:344
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:340
@@ -59637,7 +59591,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: .LBB39_3: ; %end
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:320
; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:324
; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:328
@@ -59670,7 +59624,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:436
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:440
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:444
-; GFX11-TRUE16-NEXT: s_clause 0x7
+; GFX11-TRUE16-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:448
; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:452
; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:456
@@ -59688,7 +59642,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v32f32_scalar:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:476
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:472
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:468
@@ -59721,7 +59675,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:360
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:356
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:352
-; GFX11-FAKE16-NEXT: s_clause 0x7
+; GFX11-FAKE16-NEXT: s_clause 0x7 ; 32-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:348
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:344
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:340
@@ -60578,7 +60532,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: .LBB39_3: ; %end
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:320
; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:324
; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:328
@@ -60611,7 +60565,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:436
; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:440
; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:444
-; GFX11-FAKE16-NEXT: s_clause 0x7
+; GFX11-FAKE16-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:448
; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:452
; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:456
@@ -64239,7 +64193,10 @@ define <32 x float> @bitcast_v64bf16_to_v32f32(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v32f32:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0x1
+; GFX11-TRUE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:60
@@ -64256,9 +64213,6 @@ define <32 x float> @bitcast_v64bf16_to_v32f32(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:16
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:12
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:8
-; GFX11-TRUE16-NEXT: s_clause 0x1
-; GFX11-TRUE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v32
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
@@ -64824,7 +64778,7 @@ define <32 x float> @bitcast_v64bf16_to_v32f32(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v60 :: v_dual_mov_b32 v29, v61
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v62 :: v_dual_mov_b32 v31, v63
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:8
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:12
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:16
@@ -67291,7 +67245,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
@@ -67324,7 +67278,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
@@ -67357,7 +67311,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
-; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: s_clause 0x6 ; 28-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
@@ -68159,7 +68113,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
@@ -68192,7 +68146,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
@@ -68225,7 +68179,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: s_clause 0x6 ; 28-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
@@ -68265,7 +68219,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
@@ -68298,7 +68252,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
@@ -68331,7 +68285,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: s_clause 0x8 ; 36-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
@@ -69023,7 +68977,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
@@ -69056,7 +69010,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
@@ -69089,7 +69043,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: s_clause 0x8 ; 36-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
@@ -72813,7 +72767,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
@@ -72846,7 +72800,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
@@ -72879,7 +72833,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
@@ -72957,7 +72911,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v185, off, s32
; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
@@ -72990,7 +72944,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
@@ -73023,7 +72977,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
@@ -73081,6 +73035,10 @@ define <64 x i16> @bitcast_v32f32_to_v64i16(<32 x float> %a, i32 %b) {
; SI-LABEL: bitcast_v32f32_to_v64i16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
+; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -73097,10 +73055,6 @@ define <64 x i16> @bitcast_v32f32_to_v64i16(<32 x float> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
-; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; implicit-def: $vgpr60
; SI-NEXT: ; implicit-def: $vgpr58
; SI-NEXT: ; implicit-def: $vgpr63
@@ -73132,14 +73086,13 @@ define <64 x i16> @bitcast_v32f32_to_v64i16(<32 x float> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr49
; SI-NEXT: ; kill: killed $vgpr39
; SI-NEXT: ; implicit-def: $vgpr39
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB48_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_alignbit_b32 v33, v31, v32, 16
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: v_alignbit_b32 v34, v30, v29, 16
@@ -73184,7 +73137,6 @@ define <64 x i16> @bitcast_v32f32_to_v64i16(<32 x float> %a, i32 %b) {
; SI-NEXT: s_cbranch_execz .LBB48_4
; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v31, 1.0, v31
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_f32_e32 v32, 1.0, v32
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
@@ -73437,7 +73389,7 @@ define <64 x i16> @bitcast_v32f32_to_v64i16(<32 x float> %a, i32 %b) {
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
@@ -74373,7 +74325,13 @@ define <32 x float> @bitcast_v64i16_to_v32f32(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v33
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
@@ -74405,12 +74363,6 @@ define <32 x float> @bitcast_v64i16_to_v32f32(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:100
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -74426,7 +74378,6 @@ define <32 x float> @bitcast_v64i16_to_v32f32(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v41
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
@@ -74643,7 +74594,6 @@ define <32 x float> @bitcast_v64i16_to_v32f32(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v41
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
@@ -75817,7 +75767,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
@@ -75850,7 +75800,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
@@ -75883,7 +75833,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
@@ -75961,7 +75911,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v185, off, s32
; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
@@ -75994,7 +75944,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
@@ -76027,7 +75977,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
@@ -77054,22 +77004,6 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; SI-LABEL: bitcast_v16i64_to_v128i8:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
@@ -77202,6 +77136,22 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; kill: killed $vgpr36
; SI-NEXT: ; implicit-def: $vgpr36
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr45
; SI-NEXT: ; implicit-def: $vgpr43
; SI-NEXT: ; implicit-def: $vgpr41
@@ -77233,14 +77183,13 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; kill: killed $vgpr36
; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB56_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_alignbit_b32 v33, v31, v32, 24
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -77501,7 +77450,6 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; SI-NEXT: v_addc_u32_e32 v28, vcc, 0, v28, vcc
; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29
; SI-NEXT: v_addc_u32_e32 v30, vcc, 0, v30, vcc
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v32, vcc, 3, v32
; SI-NEXT: v_addc_u32_e32 v31, vcc, 0, v31, vcc
; SI-NEXT: v_alignbit_b32 v33, v31, v32, 24
@@ -78266,22 +78214,6 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; VI-LABEL: bitcast_v16i64_to_v128i8:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32
@@ -78386,6 +78318,22 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; VI-NEXT: ; implicit-def: $vgpr39
; VI-NEXT: ; kill: killed $vgpr39
; VI-NEXT: ; implicit-def: $vgpr39
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr59
; VI-NEXT: ; kill: killed $vgpr39
; VI-NEXT: ; implicit-def: $vgpr39
@@ -78491,129 +78439,129 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v26
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[31:32]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v26
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v25
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[29:30]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v24
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v24
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v24
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[27:28]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v23
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v23
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v22
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[25:26]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v22
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v22
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[23:24]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v21
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v20
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v20
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[21:22]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v20
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v19
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v18
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v18
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[17:18]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v17
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v17
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v16
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[15:16]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v16
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v16
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[13:14]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v15
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v14
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[11:12]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v14
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v13
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[9:10]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v12
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v39, 24, v32
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v12
+; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[7:8]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; VI-NEXT: v_mov_b32_e32 v55, v39
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[5:6]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v11
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[31:32]
+; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[29:30]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[27:28]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[25:26]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[23:24]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[21:22]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[17:18]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[15:16]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[13:14]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[11:12]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[9:10]
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v39, 24, v32
-; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[7:8]
-; VI-NEXT: v_mov_b32_e32 v55, v39
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[5:6]
-; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; VI-NEXT: v_lshrrev_b64 v[41:42], 24, v[1:2]
; VI-NEXT: v_lshrrev_b32_e32 v58, 8, v27
; VI-NEXT: v_lshrrev_b32_e32 v59, 24, v10
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v35, 8, v10
; VI-NEXT: v_lshrrev_b32_e32 v60, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v49, 8, v9
@@ -79235,6 +79183,10 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX9-LABEL: bitcast_v16i64_to_v128i8:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -79251,9 +79203,6 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
; GFX9-NEXT: ; implicit-def: $vgpr40
; GFX9-NEXT: ; kill: killed $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr40
@@ -79386,7 +79335,6 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX9-NEXT: ; kill: killed $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr41
; GFX9-NEXT: ; implicit-def: $vgpr40
-; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
@@ -79442,7 +79390,7 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(29)
+; GFX9-NEXT: s_waitcnt vmcnt(45)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; GFX9-NEXT: ; implicit-def: $vgpr33
; GFX9-NEXT: ; kill: killed $vgpr33
@@ -79457,7 +79405,7 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v32
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: s_waitcnt vmcnt(47)
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v31
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v31
@@ -79469,149 +79417,147 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v30
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v29
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[31:32]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v29
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v28
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v28
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[29:30]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v28
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v27
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[27:28]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v26
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v26
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[25:26]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v25
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v24
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[23:24]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v24
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[21:22]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v23
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v22
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[19:20]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v22
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v21
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[17:18]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v20
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v20
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v20
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[15:16]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v19
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v19
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v18
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[13:14]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v18
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v17
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[11:12]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v17
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v16
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v16
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[9:10]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v15
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v14
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[7:8]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v13
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v12
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[5:6]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v11
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v10
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[31:32]
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[29:30]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[27:28]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[25:26]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[23:24]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[21:22]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[19:20]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[17:18]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[15:16]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[13:14]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[11:12]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[9:10]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[7:8]
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[5:6]
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v10
+; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; GFX9-NEXT: v_lshrrev_b32_e32 v57, 8, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 8, v15
@@ -79619,7 +79565,9 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 8, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 8, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v52, 8, v11
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v10
+; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 8, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 24, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v8
@@ -79676,7 +79624,7 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX9-NEXT: v_addc_co_u32_e32 v28, vcc, 0, v28, vcc
; GFX9-NEXT: v_add_co_u32_e32 v29, vcc, 3, v29
; GFX9-NEXT: v_addc_co_u32_e32 v30, vcc, 0, v30, vcc
-; GFX9-NEXT: s_waitcnt vmcnt(28)
+; GFX9-NEXT: s_waitcnt vmcnt(44)
; GFX9-NEXT: v_add_co_u32_e32 v31, vcc, 3, v31
; GFX9-NEXT: v_addc_co_u32_e32 v32, vcc, 0, v32, vcc
; GFX9-NEXT: v_lshrrev_b64 v[33:34], 24, v[31:32]
@@ -80712,7 +80660,11 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v16i64_to_v128i8:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x2
+; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:88
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:84
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:80
@@ -80733,10 +80685,6 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:20
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:12
-; GFX11-FAKE16-NEXT: s_clause 0x2
-; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
-; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr75
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr74
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
@@ -81381,7 +81329,7 @@ define <128 x i8> @bitcast_v16i64_to_v128i8(<16 x i64> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:12
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:20
@@ -84631,7 +84579,7 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_xor_saveexec_b32 s4, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v16, s32
; GFX11-NEXT: scratch_store_b32 off, v17, s32 offset:4
; GFX11-NEXT: scratch_store_b32 off, v18, s32 offset:8
@@ -85566,7 +85514,7 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_readlane_b32 s35, v16, 3
; GFX11-NEXT: v_readlane_b32 s34, v16, 2
; GFX11-NEXT: s_xor_saveexec_b32 s0, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v16, off, s32
; GFX11-NEXT: scratch_load_b32 v17, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v18, off, s32 offset:8
@@ -85779,13 +85727,26 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:208
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:188
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(4) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196
@@ -85946,44 +85907,30 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:356
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:384
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380
; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v3
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:364
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:372
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:384
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB58_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
@@ -85992,11 +85939,11 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v9, 0xff, v49
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
@@ -86599,7 +86546,6 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB58_4
; SI-NEXT: ; %bb.3: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
@@ -86613,8 +86559,8 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v9, 0xff, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
@@ -87294,13 +87240,25 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -87437,34 +87395,20 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v3
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v0
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -87950,7 +87894,6 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_add_u16_e32 v9, 3, v61
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
@@ -88528,13 +88471,27 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(16)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -88676,34 +88633,20 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v3
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v0
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -89190,7 +89133,6 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_add_u16_e32 v9, 3, v61
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
@@ -90329,7 +90271,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v16i64:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:592
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:588
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:584
@@ -90362,7 +90304,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:476
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:472
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:468
-; GFX11-FAKE16-NEXT: s_clause 0x12
+; GFX11-FAKE16-NEXT: s_clause 0x12 ; 76-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:464
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:460
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:456
@@ -91303,7 +91245,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, v35, v36
; GFX11-FAKE16-NEXT: .LBB58_4: ; %end
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:392
; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:396
; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:400
@@ -91336,7 +91278,7 @@ define <16 x i64> @bitcast_v128i8_to_v16i64(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:508
; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:512
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:516
-; GFX11-FAKE16-NEXT: s_clause 0x12
+; GFX11-FAKE16-NEXT: s_clause 0x12 ; 76-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:520
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:524
; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:528
@@ -92053,24 +91995,13 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB59_3
; SI-NEXT: .LBB59_2:
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v55, v56
; SI-NEXT: v_mov_b32_e32 v42, v46
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
@@ -92081,10 +92012,22 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: .LBB59_3: ; %Flow
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_mov_b32_e32 v35, v57
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
@@ -92094,7 +92037,6 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
@@ -92689,13 +92631,13 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v5
; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v7
; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v9
; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v11
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v13
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v17
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
@@ -92923,11 +92865,11 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_or_b32_sdwa v0, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v4, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v2, v6, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -92937,11 +92879,8 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
@@ -92949,6 +92888,8 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -93157,12 +93098,6 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_mov_b64 s[4:5], 0
; VI-NEXT: s_branch .LBB59_3
; VI-NEXT: .LBB59_2:
-; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v44, v56
; VI-NEXT: v_mov_b32_e32 v41, v33
; VI-NEXT: v_mov_b32_e32 v50, v40
@@ -93180,6 +93115,12 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v54, v53
; VI-NEXT: v_mov_b32_e32 v52, v36
; VI-NEXT: v_mov_b32_e32 v49, v51
@@ -93189,7 +93130,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v51, v41
; VI-NEXT: v_mov_b32_e32 v36, v44
; VI-NEXT: v_mov_b32_e32 v53, v54
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_mov_b32_e32 v54, v60
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
@@ -93202,7 +93143,6 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: ; %bb.4: ; %cmp.true
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
@@ -93787,8 +93727,8 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v11
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v9
; GFX9-NEXT: s_waitcnt vmcnt(5)
@@ -93967,16 +93907,18 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
; GFX9-NEXT: s_lshl_b32 s7, s23, 8
; GFX9-NEXT: s_lshl_b32 s8, s27, 8
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
@@ -94003,9 +93945,8 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -94021,14 +93962,16 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v14, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
@@ -94040,10 +93983,11 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v0, v16, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_mov_b32_e32 v61, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -94056,10 +94000,12 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
@@ -94073,17 +94019,22 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v53, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v0, v32, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v1, v51, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v58, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -94099,45 +94050,24 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v35, v62
; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(11)
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(10)
; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(9)
; GFX9-NEXT: v_or_b32_sdwa v0, v54, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(7)
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v1, v32, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_or_b32_sdwa v0, v44, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v0, v48, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v49, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v39, v31 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v40, v30
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -94188,18 +94118,6 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB59_3
; GFX9-NEXT: .LBB59_2:
-; GFX9-NEXT: v_mov_b32_e32 v38, v51
-; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v33, v43
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
@@ -94213,6 +94131,18 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v38, v51
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v35, v62
; GFX9-NEXT: v_mov_b32_e32 v36, v31
; GFX9-NEXT: v_mov_b32_e32 v40, v30
@@ -94650,7 +94580,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v128i8_to_v16i64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:476
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:472
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:468
@@ -94683,7 +94613,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:360
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:356
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:352
-; GFX11-TRUE16-NEXT: s_clause 0x7
+; GFX11-TRUE16-NEXT: s_clause 0x7 ; 32-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:348
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:344
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:340
@@ -95540,7 +95470,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: .LBB59_3: ; %end
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:320
; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:324
; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:328
@@ -95573,7 +95503,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:436
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:440
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:444
-; GFX11-TRUE16-NEXT: s_clause 0x7
+; GFX11-TRUE16-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:448
; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:452
; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:456
@@ -95591,7 +95521,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v16i64_scalar:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:476
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:472
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:468
@@ -95624,7 +95554,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:360
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:356
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:352
-; GFX11-FAKE16-NEXT: s_clause 0x7
+; GFX11-FAKE16-NEXT: s_clause 0x7 ; 32-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:348
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:344
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:340
@@ -96481,7 +96411,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: .LBB59_3: ; %end
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:320
; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:324
; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:328
@@ -96514,7 +96444,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:436
; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:440
; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:444
-; GFX11-FAKE16-NEXT: s_clause 0x7
+; GFX11-FAKE16-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:448
; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:452
; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:456
@@ -100084,7 +100014,10 @@ define <16 x i64> @bitcast_v64bf16_to_v16i64(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v16i64:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0x1
+; GFX11-TRUE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:60
@@ -100101,9 +100034,6 @@ define <16 x i64> @bitcast_v64bf16_to_v16i64(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:16
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:12
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:8
-; GFX11-TRUE16-NEXT: s_clause 0x1
-; GFX11-TRUE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v32
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
@@ -100669,7 +100599,7 @@ define <16 x i64> @bitcast_v64bf16_to_v16i64(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v60 :: v_dual_mov_b32 v29, v61
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v62 :: v_dual_mov_b32 v31, v63
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:8
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:12
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:16
@@ -103136,7 +103066,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
@@ -103169,7 +103099,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
@@ -103202,7 +103132,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
-; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: s_clause 0x6 ; 28-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
@@ -104004,7 +103934,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
@@ -104037,7 +103967,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
@@ -104070,7 +104000,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: s_clause 0x6 ; 28-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
@@ -104110,7 +104040,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
@@ -104143,7 +104073,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
@@ -104176,7 +104106,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: s_clause 0x8 ; 36-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
@@ -104868,7 +104798,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
@@ -104901,7 +104831,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
@@ -104934,7 +104864,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: s_clause 0x8 ; 36-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
@@ -108700,7 +108630,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
@@ -108733,7 +108663,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
@@ -108766,7 +108696,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
@@ -108844,7 +108774,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v185, off, s32
; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
@@ -108877,7 +108807,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
@@ -108910,7 +108840,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
@@ -108968,6 +108898,10 @@ define <64 x i16> @bitcast_v16i64_to_v64i16(<16 x i64> %a, i32 %b) {
; SI-LABEL: bitcast_v16i64_to_v64i16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
+; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -108984,10 +108918,6 @@ define <64 x i16> @bitcast_v16i64_to_v64i16(<16 x i64> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
-; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; implicit-def: $vgpr60
; SI-NEXT: ; implicit-def: $vgpr57
; SI-NEXT: ; implicit-def: $vgpr63
@@ -109019,14 +108949,13 @@ define <64 x i16> @bitcast_v16i64_to_v64i16(<16 x i64> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr50
; SI-NEXT: ; kill: killed $vgpr48
; SI-NEXT: ; implicit-def: $vgpr48
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB68_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_alignbit_b32 v33, v31, v32, 16
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: v_alignbit_b32 v34, v30, v29, 16
@@ -109099,7 +109028,6 @@ define <64 x i16> @bitcast_v16i64_to_v64i16(<16 x i64> %a, i32 %b) {
; SI-NEXT: v_addc_u32_e32 v28, vcc, 0, v28, vcc
; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29
; SI-NEXT: v_addc_u32_e32 v30, vcc, 0, v30, vcc
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v32, vcc, 3, v32
; SI-NEXT: v_addc_u32_e32 v31, vcc, 0, v31, vcc
; SI-NEXT: v_alignbit_b32 v33, v31, v32, 16
@@ -109322,7 +109250,7 @@ define <64 x i16> @bitcast_v16i64_to_v64i16(<16 x i64> %a, i32 %b) {
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
@@ -110320,7 +110248,13 @@ define <16 x i64> @bitcast_v64i16_to_v16i64(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v33
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
@@ -110352,12 +110286,6 @@ define <16 x i64> @bitcast_v64i16_to_v16i64(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:100
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -110373,7 +110301,6 @@ define <16 x i64> @bitcast_v64i16_to_v16i64(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v41
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
@@ -110590,7 +110517,6 @@ define <16 x i64> @bitcast_v64i16_to_v16i64(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v41
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
@@ -111764,7 +111690,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
@@ -111797,7 +111723,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
@@ -111830,7 +111756,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
@@ -111908,7 +111834,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v185, off, s32
; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
@@ -111941,7 +111867,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
@@ -111974,7 +111900,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
@@ -112032,22 +111958,6 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; SI-LABEL: bitcast_v16f64_to_v128i8:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
@@ -112180,6 +112090,22 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; kill: killed $vgpr36
; SI-NEXT: ; implicit-def: $vgpr36
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr46
; SI-NEXT: ; implicit-def: $vgpr44
; SI-NEXT: ; implicit-def: $vgpr42
@@ -112211,14 +112137,13 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr38
; SI-NEXT: ; kill: killed $vgpr36
; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB72_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_alignbit_b32 v33, v32, v31, 24
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -112449,7 +112374,6 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB72_4
; SI-NEXT: ; %bb.3: ; %cmp.true
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0
; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0
; SI-NEXT: v_alignbit_b32 v33, v32, v31, 24
@@ -113228,22 +113152,6 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; VI-LABEL: bitcast_v16f64_to_v128i8:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32
@@ -113346,6 +113254,22 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; VI-NEXT: ; implicit-def: $vgpr39
; VI-NEXT: ; kill: killed $vgpr39
; VI-NEXT: ; implicit-def: $vgpr39
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr57
; VI-NEXT: ; kill: killed $vgpr39
; VI-NEXT: ; implicit-def: $vgpr39
@@ -113448,132 +113372,132 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v28
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v28
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[31:32]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v27
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v27
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v26
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[29:30]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v26
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v26
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[27:28]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v25
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v24
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v24
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[25:26]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v24
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v23
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[23:24]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v22
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v22
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[21:22]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v21
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v21
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v20
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[19:20]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v20
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v20
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[17:18]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v19
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v18
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[15:16]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v18
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v17
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v17
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[13:14]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v16
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v16
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v16
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[11:12]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v15
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v15
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v14
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[9:10]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v14
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v14
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[7:8]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v13
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v12
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[5:6]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v12
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[31:32]
+; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v11
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[29:30]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[27:28]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[25:26]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[23:24]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[21:22]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[19:20]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[17:18]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[15:16]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[13:14]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[11:12]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[9:10]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[7:8]
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[39:40], 24, v[5:6]
-; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
; VI-NEXT: v_lshrrev_b64 v[41:42], 24, v[1:2]
; VI-NEXT: v_lshrrev_b32_e32 v56, 24, v28
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v57, 8, v11
; VI-NEXT: v_lshrrev_b32_e32 v49, 24, v10
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v10
@@ -114184,6 +114108,10 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX9-LABEL: bitcast_v16f64_to_v128i8:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -114200,9 +114128,6 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
; GFX9-NEXT: ; implicit-def: $vgpr41
; GFX9-NEXT: ; kill: killed $vgpr41
; GFX9-NEXT: ; implicit-def: $vgpr41
@@ -114335,7 +114260,6 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX9-NEXT: ; implicit-def: $vgpr48
; GFX9-NEXT: ; kill: killed $vgpr41
; GFX9-NEXT: ; implicit-def: $vgpr41
-; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
@@ -114395,7 +114319,7 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: s_waitcnt vmcnt(47)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; GFX9-NEXT: ; implicit-def: $vgpr33
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -114408,7 +114332,7 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v32
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(33)
+; GFX9-NEXT: s_waitcnt vmcnt(49)
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v31
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v31
@@ -114416,152 +114340,151 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v30
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v30
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[31:32]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v30
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v29
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v29
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[29:30]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v28
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v28
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v28
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[27:28]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v27
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v27
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v26
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[25:26]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v26
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[23:24]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v25
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v24
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[21:22]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v24
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v23
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[19:20]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v22
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v22
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v22
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[17:18]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v21
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v21
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v20
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[15:16]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v20
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v20
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v19
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[13:14]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v19
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v18
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[11:12]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v18
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v17
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v17
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[9:10]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v16
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v16
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[7:8]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v14
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v14
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[5:6]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v12
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v11
+; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[3:4]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v10
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[31:32]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v10
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[29:30]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[27:28]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[25:26]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[23:24]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[21:22]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[19:20]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[17:18]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[15:16]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[13:14]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[11:12]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[9:10]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[7:8]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[5:6]
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[3:4]
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
@@ -114571,6 +114494,7 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 8, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v61, 8, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 8, v11
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 8, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v47, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v63, 8, v9
@@ -114599,7 +114523,7 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB72_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
-; GFX9-NEXT: s_waitcnt vmcnt(30)
+; GFX9-NEXT: s_waitcnt vmcnt(46)
; GFX9-NEXT: v_add_f64 v[31:32], v[31:32], 1.0
; GFX9-NEXT: v_add_f64 v[29:30], v[29:30], 1.0
; GFX9-NEXT: v_add_f64 v[27:28], v[27:28], 1.0
@@ -115628,7 +115552,11 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v16f64_to_v128i8:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x2
+; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:88
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:84
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:80
@@ -115649,10 +115577,6 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:20
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:12
-; GFX11-FAKE16-NEXT: s_clause 0x2
-; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
-; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr75
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr74
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
@@ -116272,7 +116196,7 @@ define <128 x i8> @bitcast_v16f64_to_v128i8(<16 x double> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:12
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:20
@@ -117056,6 +116980,11 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v33, s4
; SI-NEXT: v_readlane_b32 s4, v61, 39
; SI-NEXT: v_mov_b32_e32 v30, s4
+; SI-NEXT: v_mov_b32_e32 v29, s46
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v29, s98
; SI-NEXT: v_readlane_b32 s4, v61, 40
; SI-NEXT: v_mov_b32_e32 v34, s4
; SI-NEXT: v_readlane_b32 s4, v61, 41
@@ -117148,6 +117077,10 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v25, s4
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v29, s96
; SI-NEXT: v_readlane_b32 s4, v62, 0
; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -117204,20 +117137,69 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v25, s4
-; SI-NEXT: v_mov_b32_e32 v29, s46
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v29, s98
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v29, s96
+; SI-NEXT: v_readlane_b32 s4, v62, 14
+; SI-NEXT: v_mov_b32_e32 v60, s4
+; SI-NEXT: v_readlane_b32 s4, v62, 15
+; SI-NEXT: v_mov_b32_e32 v31, s4
+; SI-NEXT: v_readlane_b32 s4, v62, 16
+; SI-NEXT: v_mov_b32_e32 v32, s4
+; SI-NEXT: v_readlane_b32 s4, v62, 17
+; SI-NEXT: v_mov_b32_e32 v18, s5
+; SI-NEXT: v_mov_b32_e32 v46, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 0
+; SI-NEXT: v_readlane_b32 s5, v61, 1
+; SI-NEXT: v_mov_b32_e32 v59, s17
+; SI-NEXT: v_mov_b32_e32 v58, s16
+; SI-NEXT: v_mov_b32_e32 v45, s19
+; SI-NEXT: v_mov_b32_e32 v44, s18
+; SI-NEXT: v_mov_b32_e32 v53, s21
+; SI-NEXT: v_mov_b32_e32 v52, s20
+; SI-NEXT: v_mov_b32_e32 v39, s23
+; SI-NEXT: v_mov_b32_e32 v38, s22
+; SI-NEXT: v_mov_b32_e32 v24, s25
+; SI-NEXT: v_mov_b32_e32 v23, s24
+; SI-NEXT: v_mov_b32_e32 v22, s27
+; SI-NEXT: v_mov_b32_e32 v21, s26
+; SI-NEXT: v_mov_b32_e32 v20, s29
+; SI-NEXT: v_mov_b32_e32 v19, s28
+; SI-NEXT: v_mov_b32_e32 v16, s7
+; SI-NEXT: v_mov_b32_e32 v15, s6
+; SI-NEXT: v_mov_b32_e32 v14, s9
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s86
+; SI-NEXT: v_mov_b32_e32 v13, s8
+; SI-NEXT: v_mov_b32_e32 v12, s11
+; SI-NEXT: v_mov_b32_e32 v11, s10
+; SI-NEXT: v_mov_b32_e32 v10, s13
+; SI-NEXT: v_mov_b32_e32 v9, s12
+; SI-NEXT: v_mov_b32_e32 v8, s15
+; SI-NEXT: v_mov_b32_e32 v7, s14
+; SI-NEXT: v_mov_b32_e32 v6, s41
+; SI-NEXT: v_mov_b32_e32 v5, s40
+; SI-NEXT: v_mov_b32_e32 v4, s43
+; SI-NEXT: v_mov_b32_e32 v3, s42
+; SI-NEXT: v_mov_b32_e32 v2, s45
+; SI-NEXT: v_mov_b32_e32 v1, s44
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: v_mov_b32_e32 v28, s38
+; SI-NEXT: v_mov_b32_e32 v27, s36
+; SI-NEXT: v_mov_b32_e32 v26, s34
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v25, s30
+; SI-NEXT: v_mov_b32_e32 v56, s94
+; SI-NEXT: v_mov_b32_e32 v55, s92
+; SI-NEXT: v_mov_b32_e32 v54, s90
+; SI-NEXT: v_mov_b32_e32 v42, s88
+; SI-NEXT: v_mov_b32_e32 v41, s78
+; SI-NEXT: v_mov_b32_e32 v40, s76
+; SI-NEXT: v_mov_b32_e32 v50, s74
+; SI-NEXT: v_mov_b32_e32 v49, s72
+; SI-NEXT: v_mov_b32_e32 v48, s62
+; SI-NEXT: v_mov_b32_e32 v47, s60
+; SI-NEXT: v_mov_b32_e32 v36, s58
+; SI-NEXT: v_mov_b32_e32 v35, s56
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(1)
@@ -117260,165 +117242,108 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v29, s50
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s4, v62, 14
-; SI-NEXT: v_mov_b32_e32 v60, s4
-; SI-NEXT: v_readlane_b32 s4, v62, 15
-; SI-NEXT: v_mov_b32_e32 v31, s4
-; SI-NEXT: v_readlane_b32 s4, v62, 16
-; SI-NEXT: v_mov_b32_e32 v32, s4
-; SI-NEXT: v_readlane_b32 s4, v62, 17
-; SI-NEXT: v_mov_b32_e32 v18, s5
-; SI-NEXT: v_mov_b32_e32 v46, s4
-; SI-NEXT: v_readlane_b32 s4, v61, 0
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 2
+; SI-NEXT: v_readlane_b32 s5, v61, 3
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 1
-; SI-NEXT: v_readlane_b32 s4, v61, 2
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 4
+; SI-NEXT: v_readlane_b32 s5, v61, 5
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 3
-; SI-NEXT: v_readlane_b32 s4, v61, 4
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 6
+; SI-NEXT: v_readlane_b32 s5, v61, 7
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 5
-; SI-NEXT: v_readlane_b32 s4, v61, 6
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 8
+; SI-NEXT: v_readlane_b32 s5, v61, 9
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 7
-; SI-NEXT: v_readlane_b32 s4, v61, 8
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 10
+; SI-NEXT: v_readlane_b32 s5, v61, 11
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 9
-; SI-NEXT: v_readlane_b32 s4, v61, 10
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 12
+; SI-NEXT: v_readlane_b32 s5, v61, 13
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 11
-; SI-NEXT: v_readlane_b32 s4, v61, 12
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 14
+; SI-NEXT: v_readlane_b32 s5, v61, 15
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 13
-; SI-NEXT: v_readlane_b32 s4, v61, 14
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 16
+; SI-NEXT: v_readlane_b32 s5, v61, 17
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 15
-; SI-NEXT: v_readlane_b32 s4, v61, 16
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 18
+; SI-NEXT: v_readlane_b32 s5, v61, 19
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 17
-; SI-NEXT: v_readlane_b32 s4, v61, 18
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 20
+; SI-NEXT: v_readlane_b32 s5, v61, 21
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 19
-; SI-NEXT: v_readlane_b32 s4, v61, 20
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 22
+; SI-NEXT: v_readlane_b32 s5, v61, 23
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 21
-; SI-NEXT: v_readlane_b32 s4, v61, 22
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 24
+; SI-NEXT: v_readlane_b32 s5, v61, 25
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 23
-; SI-NEXT: v_readlane_b32 s4, v61, 24
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 26
+; SI-NEXT: v_readlane_b32 s5, v61, 27
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 25
-; SI-NEXT: v_readlane_b32 s4, v61, 26
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 28
+; SI-NEXT: v_readlane_b32 s5, v61, 29
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 27
-; SI-NEXT: v_readlane_b32 s4, v61, 28
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 30
+; SI-NEXT: v_readlane_b32 s5, v61, 31
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 29
-; SI-NEXT: v_readlane_b32 s4, v61, 30
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 32
+; SI-NEXT: v_readlane_b32 s5, v61, 33
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s48
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: v_readlane_b32 s5, v61, 31
-; SI-NEXT: v_readlane_b32 s4, v61, 32
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v29, s4
-; SI-NEXT: v_mov_b32_e32 v59, s17
-; SI-NEXT: v_mov_b32_e32 v58, s16
-; SI-NEXT: v_mov_b32_e32 v45, s19
-; SI-NEXT: v_mov_b32_e32 v44, s18
-; SI-NEXT: v_mov_b32_e32 v53, s21
-; SI-NEXT: v_mov_b32_e32 v52, s20
-; SI-NEXT: v_mov_b32_e32 v39, s23
-; SI-NEXT: v_mov_b32_e32 v38, s22
-; SI-NEXT: v_mov_b32_e32 v24, s25
-; SI-NEXT: v_mov_b32_e32 v23, s24
-; SI-NEXT: v_mov_b32_e32 v22, s27
-; SI-NEXT: v_mov_b32_e32 v21, s26
-; SI-NEXT: v_mov_b32_e32 v20, s29
-; SI-NEXT: v_mov_b32_e32 v19, s28
-; SI-NEXT: v_mov_b32_e32 v16, s7
-; SI-NEXT: v_mov_b32_e32 v15, s6
-; SI-NEXT: v_mov_b32_e32 v14, s9
-; SI-NEXT: v_mov_b32_e32 v13, s8
-; SI-NEXT: v_mov_b32_e32 v12, s11
-; SI-NEXT: v_mov_b32_e32 v11, s10
-; SI-NEXT: v_mov_b32_e32 v10, s13
-; SI-NEXT: v_mov_b32_e32 v9, s12
-; SI-NEXT: v_mov_b32_e32 v8, s15
-; SI-NEXT: v_mov_b32_e32 v7, s14
-; SI-NEXT: v_mov_b32_e32 v6, s41
-; SI-NEXT: v_mov_b32_e32 v5, s40
-; SI-NEXT: v_mov_b32_e32 v4, s43
-; SI-NEXT: v_mov_b32_e32 v3, s42
-; SI-NEXT: v_mov_b32_e32 v2, s45
-; SI-NEXT: v_mov_b32_e32 v1, s44
-; SI-NEXT: v_mov_b32_e32 v28, s38
-; SI-NEXT: v_mov_b32_e32 v27, s36
-; SI-NEXT: v_mov_b32_e32 v26, s34
-; SI-NEXT: v_mov_b32_e32 v25, s30
-; SI-NEXT: v_mov_b32_e32 v56, s94
-; SI-NEXT: v_mov_b32_e32 v55, s92
-; SI-NEXT: v_mov_b32_e32 v54, s90
-; SI-NEXT: v_mov_b32_e32 v42, s88
-; SI-NEXT: v_mov_b32_e32 v41, s78
-; SI-NEXT: v_mov_b32_e32 v40, s76
-; SI-NEXT: v_mov_b32_e32 v50, s74
-; SI-NEXT: v_mov_b32_e32 v49, s72
-; SI-NEXT: v_mov_b32_e32 v48, s62
-; SI-NEXT: v_mov_b32_e32 v47, s60
-; SI-NEXT: v_mov_b32_e32 v36, s58
-; SI-NEXT: v_mov_b32_e32 v35, s56
-; SI-NEXT: v_readlane_b32 s5, v61, 33
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: .LBB73_5: ; %end
@@ -118690,6 +118615,10 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v35, s4
; VI-NEXT: v_readlane_b32 s4, v62, 11
; VI-NEXT: v_mov_b32_e32 v41, s4
+; VI-NEXT: v_mov_b32_e32 v40, s48
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s38
; VI-NEXT: v_readlane_b32 s4, v62, 12
; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v35, s4
@@ -118727,6 +118656,9 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v35, s4
; VI-NEXT: v_readlane_b32 s4, v62, 25
; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s36
; VI-NEXT: v_mov_b32_e32 v35, s4
; VI-NEXT: v_readlane_b32 s4, v62, 26
; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
@@ -118764,6 +118696,9 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: v_readlane_b32 s4, v62, 37
; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v35, s4
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s34
; VI-NEXT: v_readlane_b32 s4, v62, 38
; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v35, s4
@@ -118779,52 +118714,6 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: v_readlane_b32 s4, v62, 42
; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v35, s4
-; VI-NEXT: v_mov_b32_e32 v40, s48
-; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s38
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s36
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s34
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s30
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s90
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s88
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s78
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s76
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s74
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s72
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s62
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s60
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s58
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v40, s56
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: v_readlane_b32 s4, v62, 43
; VI-NEXT: v_mov_b32_e32 v53, s4
; VI-NEXT: v_readlane_b32 s4, v62, 44
@@ -118834,6 +118723,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: v_readlane_b32 s4, v62, 46
; VI-NEXT: v_mov_b32_e32 v58, s4
; VI-NEXT: v_readlane_b32 s4, v62, 47
+; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v35, s4
; VI-NEXT: v_readlane_b32 s4, v62, 48
; VI-NEXT: v_mov_b32_e32 v54, s4
@@ -118846,17 +118736,17 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: v_readlane_b32 s4, v62, 52
; VI-NEXT: v_mov_b32_e32 v39, s4
; VI-NEXT: v_readlane_b32 s4, v62, 53
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s30
; VI-NEXT: v_mov_b32_e32 v49, s4
; VI-NEXT: v_readlane_b32 s4, v62, 54
; VI-NEXT: v_mov_b32_e32 v61, s4
; VI-NEXT: v_readlane_b32 s4, v62, 55
; VI-NEXT: v_mov_b32_e32 v36, s4
; VI-NEXT: v_readlane_b32 s4, v62, 56
-; VI-NEXT: v_mov_b32_e32 v40, s46
; VI-NEXT: v_mov_b32_e32 v55, s4
; VI-NEXT: v_readlane_b32 s4, v62, 57
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v12, s5
; VI-NEXT: v_mov_b32_e32 v1, s44
; VI-NEXT: v_mov_b32_e32 v2, s45
@@ -118886,13 +118776,48 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v28, s21
; VI-NEXT: v_mov_b32_e32 v29, s18
; VI-NEXT: v_mov_b32_e32 v30, s19
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s90
; VI-NEXT: v_mov_b32_e32 v31, s16
; VI-NEXT: v_mov_b32_e32 v32, s17
; VI-NEXT: v_mov_b32_e32 v42, s70
; VI-NEXT: v_mov_b32_e32 v50, s4
-; VI-NEXT: v_mov_b32_e32 v40, v43
; VI-NEXT: v_mov_b32_e32 v46, v38
; VI-NEXT: v_mov_b32_e32 v38, v34
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s88
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s78
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s76
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s74
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s72
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s62
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s60
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s58
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s56
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, s46
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v40, v43
; VI-NEXT: .LBB73_5: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v34, 8, v42
; VI-NEXT: v_or_b32_sdwa v31, v31, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -119906,6 +119831,12 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: s_branch .LBB73_2
; GFX9-NEXT: .LBB73_4:
+; GFX9-NEXT: v_mov_b32_e32 v41, s66
+; GFX9-NEXT: v_mov_b32_e32 v40, s36
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s34
; GFX9-NEXT: v_mov_b32_e32 v15, s81
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v15, s71
@@ -119982,6 +119913,10 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v15, s4
; GFX9-NEXT: v_readlane_b32 s4, v62, 9
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s30
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v15, s4
; GFX9-NEXT: v_readlane_b32 s4, v62, 10
@@ -120040,71 +119975,10 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_readlane_b32 s4, v62, 28
; GFX9-NEXT: v_mov_b32_e32 v29, s4
; GFX9-NEXT: v_readlane_b32 s4, v62, 29
-; GFX9-NEXT: v_mov_b32_e32 v41, s66
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v15, s4
-; GFX9-NEXT: v_mov_b32_e32 v40, s36
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s34
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s30
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s94
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s92
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s90
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s88
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s78
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s76
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s74
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s72
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s62
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s60
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s58
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v40, s56
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: v_readlane_b32 s4, v62, 30
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v15, s4
; GFX9-NEXT: v_readlane_b32 s4, v62, 31
; GFX9-NEXT: v_mov_b32_e32 v44, s4
@@ -120119,6 +119993,10 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_readlane_b32 s4, v62, 36
; GFX9-NEXT: v_mov_b32_e32 v55, s4
; GFX9-NEXT: v_readlane_b32 s4, v62, 37
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s94
; GFX9-NEXT: v_mov_b32_e32 v61, s4
; GFX9-NEXT: v_readlane_b32 s4, v62, 38
; GFX9-NEXT: v_mov_b32_e32 v42, s4
@@ -120143,7 +120021,6 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_readlane_b32 s4, v62, 48
; GFX9-NEXT: v_mov_b32_e32 v60, s4
; GFX9-NEXT: v_readlane_b32 s4, v62, 49
-; GFX9-NEXT: v_mov_b32_e32 v40, s46
; GFX9-NEXT: v_mov_b32_e32 v12, s5
; GFX9-NEXT: v_mov_b32_e32 v1, s44
; GFX9-NEXT: v_mov_b32_e32 v2, s45
@@ -120181,6 +120058,54 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v54, s64
; GFX9-NEXT: v_mov_b32_e32 v52, s54
; GFX9-NEXT: v_mov_b32_e32 v25, s4
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s92
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s90
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s88
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s78
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s76
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s74
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s72
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s62
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s60
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s58
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s56
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v40, s46
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
@@ -120202,6 +120127,8 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v25, v51, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v45
; GFX9-NEXT: v_or_b32_sdwa v48, v48, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshlrev_b32_e32 v40, 8, v56
; GFX9-NEXT: v_or_b32_sdwa v50, v50, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshlrev_b32_e32 v22, 8, v22
@@ -120252,22 +120179,20 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_readlane_b32 s31, v63, 1
; GFX9-NEXT: v_readlane_b32 s30, v63, 0
; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v36
; GFX9-NEXT: v_or_b32_sdwa v27, v27, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v44
; GFX9-NEXT: v_or_b32_sdwa v28, v28, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v29
; GFX9-NEXT: v_or_b32_sdwa v29, v19, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v30
; GFX9-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v20, v20, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v51
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v36
; GFX9-NEXT: v_or_b32_sdwa v23, v23, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -120599,7 +120524,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_or_saveexec_b32 s4, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:80
; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:84
; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:88
@@ -120634,7 +120559,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_writelane_b32 v77, s101, 5
; GFX11-NEXT: s_mov_b32 vcc_hi, 0
; GFX11-NEXT: s_and_b32 s42, vcc_lo, exec_lo
-; GFX11-NEXT: s_clause 0x13
+; GFX11-NEXT: s_clause 0x13 ; 80-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:76
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:72
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:68
@@ -121542,7 +121467,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: scratch_store_b128 v0, v[11:14], off offset:80
; GFX11-NEXT: scratch_store_b128 v0, v[7:10], off offset:96
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
-; GFX11-NEXT: s_clause 0x13
+; GFX11-NEXT: s_clause 0x13 ; 80-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v75, off, s32
; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:8
@@ -121605,7 +121530,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_readlane_b32 s31, v76, 1
; GFX11-NEXT: v_readlane_b32 s30, v76, 0
; GFX11-NEXT: s_or_saveexec_b32 s0, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:80
; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:84
; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:88
@@ -121818,13 +121743,26 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:208
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:188
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(4) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196
@@ -121985,44 +121923,30 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:356
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:384
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380
; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v3
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:364
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:372
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:384
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB74_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
@@ -122031,11 +121955,11 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v9, 0xff, v49
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
@@ -122638,7 +122562,6 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB74_4
; SI-NEXT: ; %bb.3: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
@@ -122652,8 +122575,8 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v9, 0xff, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
@@ -123333,13 +123256,25 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -123476,34 +123411,20 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v3
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v0
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -123989,7 +123910,6 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_add_u16_e32 v9, 3, v61
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
@@ -124567,13 +124487,27 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(16)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -124715,34 +124649,20 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v3
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v0
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -125229,7 +125149,6 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_add_u16_e32 v9, 3, v61
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
@@ -126368,7 +126287,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v16f64:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:592
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:588
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:584
@@ -126401,7 +126320,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:476
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:472
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:468
-; GFX11-FAKE16-NEXT: s_clause 0x12
+; GFX11-FAKE16-NEXT: s_clause 0x12 ; 76-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:464
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:460
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:456
@@ -127342,7 +127261,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: v_or_b32_e32 v31, v35, v36
; GFX11-FAKE16-NEXT: .LBB74_4: ; %end
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v138, off, s32 offset:392
; GFX11-FAKE16-NEXT: scratch_load_b32 v137, off, s32 offset:396
; GFX11-FAKE16-NEXT: scratch_load_b32 v136, off, s32 offset:400
@@ -127375,7 +127294,7 @@ define <16 x double> @bitcast_v128i8_to_v16f64(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:508
; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:512
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:516
-; GFX11-FAKE16-NEXT: s_clause 0x12
+; GFX11-FAKE16-NEXT: s_clause 0x12 ; 76-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:520
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:524
; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:528
@@ -128092,24 +128011,13 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB75_3
; SI-NEXT: .LBB75_2:
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v55, v56
; SI-NEXT: v_mov_b32_e32 v42, v46
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
@@ -128120,10 +128028,22 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: .LBB75_3: ; %Flow
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_mov_b32_e32 v35, v57
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
@@ -128133,7 +128053,6 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
@@ -128728,13 +128647,13 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v5
; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v7
; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v9
; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v11
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v13
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v17
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
@@ -128962,11 +128881,11 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_or_b32_sdwa v0, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v4, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v2, v6, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -128976,11 +128895,8 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
@@ -128988,6 +128904,8 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -129196,12 +129114,6 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: s_mov_b64 s[4:5], 0
; VI-NEXT: s_branch .LBB75_3
; VI-NEXT: .LBB75_2:
-; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v44, v56
; VI-NEXT: v_mov_b32_e32 v41, v33
; VI-NEXT: v_mov_b32_e32 v50, v40
@@ -129219,6 +129131,12 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v54, v53
; VI-NEXT: v_mov_b32_e32 v52, v36
; VI-NEXT: v_mov_b32_e32 v49, v51
@@ -129228,7 +129146,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: v_mov_b32_e32 v51, v41
; VI-NEXT: v_mov_b32_e32 v36, v44
; VI-NEXT: v_mov_b32_e32 v53, v54
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_mov_b32_e32 v54, v60
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
@@ -129241,7 +129159,6 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: ; %bb.4: ; %cmp.true
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
@@ -129826,8 +129743,8 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v11
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v9
; GFX9-NEXT: s_waitcnt vmcnt(5)
@@ -130006,16 +129923,18 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
; GFX9-NEXT: s_lshl_b32 s7, s23, 8
; GFX9-NEXT: s_lshl_b32 s8, s27, 8
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
@@ -130042,9 +129961,8 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -130060,14 +129978,16 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v14, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
@@ -130079,10 +129999,11 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v0, v16, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_mov_b32_e32 v61, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -130095,10 +130016,12 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
@@ -130112,17 +130035,22 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v53, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v0, v32, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v1, v51, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v58, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -130138,45 +130066,24 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v35, v62
; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(11)
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(10)
; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(9)
; GFX9-NEXT: v_or_b32_sdwa v0, v54, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(7)
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v1, v32, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_or_b32_sdwa v0, v44, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v0, v48, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v49, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v39, v31 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v40, v30
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -130227,18 +130134,6 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB75_3
; GFX9-NEXT: .LBB75_2:
-; GFX9-NEXT: v_mov_b32_e32 v38, v51
-; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v33, v43
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
@@ -130252,6 +130147,18 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v38, v51
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v35, v62
; GFX9-NEXT: v_mov_b32_e32 v36, v31
; GFX9-NEXT: v_mov_b32_e32 v40, v30
@@ -130689,7 +130596,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-LABEL: bitcast_v128i8_to_v16f64_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:476
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:472
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:468
@@ -130722,7 +130629,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:360
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:356
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:352
-; GFX11-TRUE16-NEXT: s_clause 0x7
+; GFX11-TRUE16-NEXT: s_clause 0x7 ; 32-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:348
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:344
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:340
@@ -131579,7 +131486,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-TRUE16-NEXT: .LBB75_3: ; %end
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:320
; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:324
; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:328
@@ -131612,7 +131519,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:436
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:440
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:444
-; GFX11-TRUE16-NEXT: s_clause 0x7
+; GFX11-TRUE16-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:448
; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:452
; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:456
@@ -131630,7 +131537,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v16f64_scalar:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:476
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:472
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:468
@@ -131663,7 +131570,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:360
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:356
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:352
-; GFX11-FAKE16-NEXT: s_clause 0x7
+; GFX11-FAKE16-NEXT: s_clause 0x7 ; 32-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:348
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:344
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:340
@@ -132520,7 +132427,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-FAKE16-NEXT: .LBB75_3: ; %end
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:320
; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:324
; GFX11-FAKE16-NEXT: scratch_load_b32 v109, off, s32 offset:328
@@ -132553,7 +132460,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:436
; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:440
; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:444
-; GFX11-FAKE16-NEXT: s_clause 0x7
+; GFX11-FAKE16-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:448
; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:452
; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:456
@@ -132588,22 +132495,6 @@ define <64 x bfloat> @bitcast_v16f64_to_v64bf16(<16 x double> %a, i32 %b) {
; SI-LABEL: bitcast_v16f64_to_v64bf16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
@@ -132672,6 +132563,22 @@ define <64 x bfloat> @bitcast_v16f64_to_v64bf16(<16 x double> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr35
; SI-NEXT: ; kill: killed $vgpr35
; SI-NEXT: ; implicit-def: $vgpr35
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr34
; SI-NEXT: ; implicit-def: $vgpr62
; SI-NEXT: ; implicit-def: $vgpr63
@@ -132703,7 +132610,7 @@ define <64 x bfloat> @bitcast_v16f64_to_v64bf16(<16 x double> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr37
; SI-NEXT: ; kill: killed $vgpr35
; SI-NEXT: ; implicit-def: $vgpr35
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -132713,7 +132620,7 @@ define <64 x bfloat> @bitcast_v16f64_to_v64bf16(<16 x double> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v35, 0xffff0000, v32
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32
; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v31
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
@@ -132843,7 +132750,6 @@ define <64 x bfloat> @bitcast_v16f64_to_v64bf16(<16 x double> %a, i32 %b) {
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB76_4
; SI-NEXT: ; %bb.3: ; %cmp.true
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0
; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0
; SI-NEXT: v_and_b32_e32 v35, 0xffff0000, v32
@@ -136071,7 +135977,10 @@ define <16 x double> @bitcast_v64bf16_to_v16f64(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v16f64:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0x1
+; GFX11-TRUE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:60
@@ -136088,9 +135997,6 @@ define <16 x double> @bitcast_v64bf16_to_v16f64(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:16
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:12
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:8
-; GFX11-TRUE16-NEXT: s_clause 0x1
-; GFX11-TRUE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v32
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
@@ -136656,7 +136562,7 @@ define <16 x double> @bitcast_v64bf16_to_v16f64(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, v60 :: v_dual_mov_b32 v29, v61
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, v62 :: v_dual_mov_b32 v31, v63
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:8
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:12
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:16
@@ -139123,7 +139029,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:280
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:276
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:272
@@ -139156,7 +139062,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:160
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:156
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:152
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:148
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:144
@@ -139189,7 +139095,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:32
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:28
-; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: s_clause 0x6 ; 28-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:24
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:20
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:16
@@ -139991,7 +139897,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v172 :: v_dual_mov_b32 v21, v169
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v20, v170 :: v_dual_mov_b32 v23, v183
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v22, v168 :: v_dual_mov_b32 v25, v181
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v174, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v173, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v172, off, s32 offset:8
@@ -140024,7 +139930,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:136
@@ -140057,7 +139963,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0x6
+; GFX11-TRUE16-NEXT: s_clause 0x6 ; 28-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:264
@@ -140097,7 +140003,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:288
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:284
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:280
@@ -140130,7 +140036,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:172
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:168
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:164
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:160
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:156
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:152
@@ -140163,7 +140069,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v157, s32 offset:44
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v158, s32 offset:40
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v159, s32 offset:36
-; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: s_clause 0x8 ; 36-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v168, s32 offset:32
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v169, s32 offset:28
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v170, s32 offset:24
@@ -140855,7 +140761,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v20, v184 :: v_dual_mov_b32 v23, v174
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v22, v171 :: v_dual_mov_b32 v25, v169
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v26, v170 :: v_dual_mov_b32 v29, v180
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v184, off, s32
; GFX11-FAKE16-NEXT: scratch_load_b32 v175, off, s32 offset:4
; GFX11-FAKE16-NEXT: scratch_load_b32 v174, off, s32 offset:8
@@ -140888,7 +140794,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: scratch_load_b32 v123, off, s32 offset:116
; GFX11-FAKE16-NEXT: scratch_load_b32 v122, off, s32 offset:120
; GFX11-FAKE16-NEXT: scratch_load_b32 v121, off, s32 offset:124
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v120, off, s32 offset:128
; GFX11-FAKE16-NEXT: scratch_load_b32 v111, off, s32 offset:132
; GFX11-FAKE16-NEXT: scratch_load_b32 v110, off, s32 offset:136
@@ -140921,7 +140827,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:244
; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:248
; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:252
-; GFX11-FAKE16-NEXT: s_clause 0x8
+; GFX11-FAKE16-NEXT: s_clause 0x8 ; 36-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:256
; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:260
; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:264
@@ -140978,22 +140884,6 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) {
; SI-LABEL: bitcast_v16f64_to_v64f16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
@@ -141062,6 +140952,22 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr35
; SI-NEXT: ; kill: killed $vgpr35
; SI-NEXT: ; implicit-def: $vgpr35
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr57
; SI-NEXT: ; implicit-def: $vgpr34
; SI-NEXT: ; implicit-def: $vgpr47
@@ -141093,7 +140999,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; kill: killed $vgpr35
; SI-NEXT: ; implicit-def: $vgpr35
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -141144,7 +141050,6 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v8, v52
; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v32
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v31
; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v28
; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v22
@@ -141314,7 +141219,6 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) {
; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0
; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0
; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8
; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v9
@@ -144567,7 +144471,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
@@ -144600,7 +144504,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
@@ -144633,7 +144537,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
@@ -144711,7 +144615,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v185, off, s32
; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
@@ -144744,7 +144648,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
@@ -144777,7 +144681,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
@@ -144835,6 +144739,10 @@ define <64 x i16> @bitcast_v16f64_to_v64i16(<16 x double> %a, i32 %b) {
; SI-LABEL: bitcast_v16f64_to_v64i16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -144851,10 +144759,6 @@ define <64 x i16> @bitcast_v16f64_to_v64i16(<16 x double> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; implicit-def: $vgpr60
; SI-NEXT: ; implicit-def: $vgpr58
; SI-NEXT: ; implicit-def: $vgpr63
@@ -144886,14 +144790,13 @@ define <64 x i16> @bitcast_v16f64_to_v64i16(<16 x double> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr50
; SI-NEXT: ; kill: killed $vgpr48
; SI-NEXT: ; implicit-def: $vgpr48
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB84_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_alignbit_b32 v33, v32, v31, 16
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: v_alignbit_b32 v34, v30, v29, 16
@@ -144937,7 +144840,6 @@ define <64 x i16> @bitcast_v16f64_to_v64i16(<16 x double> %a, i32 %b) {
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB84_4
; SI-NEXT: ; %bb.3: ; %cmp.true
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0
; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0
; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0
@@ -145175,7 +145077,7 @@ define <64 x i16> @bitcast_v16f64_to_v64i16(<16 x double> %a, i32 %b) {
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v31
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
@@ -146031,7 +145933,13 @@ define <16 x double> @bitcast_v64i16_to_v16f64(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v33
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
@@ -146063,12 +145971,6 @@ define <16 x double> @bitcast_v64i16_to_v16f64(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:100
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -146084,7 +145986,6 @@ define <16 x double> @bitcast_v64i16_to_v16f64(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v41
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
@@ -146301,7 +146202,6 @@ define <16 x double> @bitcast_v64i16_to_v16f64(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v41
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
@@ -147475,7 +147375,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
@@ -147508,7 +147408,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
@@ -147541,7 +147441,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
@@ -147619,7 +147519,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v185, off, s32
; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
@@ -147652,7 +147552,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
@@ -147685,7 +147585,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
+; GFX11-NEXT: s_clause 0x9 ; 40-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
@@ -147895,6 +147795,8 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v25
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:268
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:264
; SI-NEXT: ; implicit-def: $vgpr11
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr9
@@ -147904,7 +147806,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:140
@@ -147944,38 +147846,39 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v29
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:88
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:212
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:192
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:88
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v2
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:208
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:204
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:200
@@ -147991,11 +147894,12 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v2
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:240
-; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -148017,14 +147921,6 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v2
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v3
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:272
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:268
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:264
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:292
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:308
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316
@@ -148032,11 +147928,15 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:304
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:300
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:296
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:272
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v1
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v41, 24, v2
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v44, 8, v3
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:340
@@ -148045,9 +147945,11 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:336
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:332
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:328
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v45, 24, v1
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v2
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:356
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:372
@@ -148057,7 +147959,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:368
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:364
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:360
-; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_lshlrev_b32_e32 v61, 8, v3
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v62, 24, v1
@@ -149940,8 +149842,8 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v25
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v29
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v55, 8, v3
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v40, 8, v5
; VI-NEXT: v_lshlrev_b16_e32 v41, 8, v7
; VI-NEXT: v_lshlrev_b16_e32 v50, 8, v9
@@ -150037,13 +149939,25 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:52
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -150171,14 +150085,19 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v38, 8, v0
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_lshlrev_b16_e32 v39, 8, v1
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v49, 8, v2
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshlrev_b16_e32 v51, 8, v3
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:356
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_lshlrev_b16_e32 v51, 8, v3
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b16_e32 v53, 8, v0
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -150186,26 +150105,6 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshlrev_b16_e32 v53, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:52
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -150214,35 +150113,57 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_or_b32_sdwa v0, v0, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: s_waitcnt vmcnt(12)
; VI-NEXT: v_or_b32_sdwa v1, v1, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(11)
; VI-NEXT: v_or_b32_sdwa v3, v3, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v2, v2, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: v_or_b32_sdwa v2, v2, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_or_b32_sdwa v4, v4, v48 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_or_b32_sdwa v10, v61, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v5, v5, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v11, v57, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_or_b32_sdwa v6, v6, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v12, v59, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v13, v47, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v14, v45, v14 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v15, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: ; implicit-def: $vgpr61
+; VI-NEXT: ; implicit-def: $vgpr57
+; VI-NEXT: ; implicit-def: $vgpr59
+; VI-NEXT: ; implicit-def: $vgpr47
+; VI-NEXT: ; implicit-def: $vgpr45
+; VI-NEXT: ; implicit-def: $vgpr43
; VI-NEXT: ; implicit-def: $vgpr54
; VI-NEXT: ; implicit-def: $vgpr55
; VI-NEXT: ; implicit-def: $vgpr40
; VI-NEXT: ; implicit-def: $vgpr41
; VI-NEXT: ; implicit-def: $vgpr48
; VI-NEXT: ; implicit-def: $vgpr36
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v6, v6, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: ; implicit-def: $vgpr34
+; VI-NEXT: v_or_b32_sdwa v31, v31, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: ; implicit-def: $vgpr49
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v2, v2, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -150275,39 +150196,19 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v6, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: ; implicit-def: $vgpr53
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_or_b32_sdwa v10, v61, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_or_b32_sdwa v11, v57, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_or_b32_sdwa v12, v59, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_or_b32_sdwa v13, v47, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v14, v45, v14 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr61
-; VI-NEXT: ; implicit-def: $vgpr57
-; VI-NEXT: ; implicit-def: $vgpr59
-; VI-NEXT: ; implicit-def: $vgpr47
-; VI-NEXT: ; implicit-def: $vgpr45
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v15, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr43
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v8, v62, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -150473,17 +150374,9 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v28, v28, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v29, v29, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v31, v31, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr49
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr53
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v30, v30, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v29, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -151168,8 +151061,8 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v27
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v29
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v54, 8, v3
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v41, 8, v5
; GFX9-NEXT: v_lshlrev_b16_e32 v40, 8, v7
; GFX9-NEXT: v_lshlrev_b16_e32 v51, 8, v9
@@ -151280,13 +151173,27 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(16)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -151419,14 +151326,19 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v37, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_lshlrev_b16_e32 v49, 8, v1
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_lshlrev_b16_e32 v48, 8, v2
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v52, 8, v3
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:356
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v52, 8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b16_e32 v53, 8, v0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -151434,26 +151346,6 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v53, 8, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -151462,36 +151354,62 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; GFX9-NEXT: s_mov_b32 s6, 0x5040100
+; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v55 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
+; GFX9-NEXT: s_waitcnt vmcnt(14)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_or_b32_sdwa v2, v2, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v3, v3, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v0, v1, v0, s6
+; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: v_or_b32_sdwa v3, v3, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v1, v3, v2, s6
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(10)
+; GFX9-NEXT: v_or_b32_sdwa v10, v60, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v11, v56, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v4, v4, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_or_b32_sdwa v12, v58, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v5, v5, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v13, v46, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v6, v6, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v14, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v31, v31, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: ; implicit-def: $vgpr60
+; GFX9-NEXT: ; implicit-def: $vgpr56
+; GFX9-NEXT: ; implicit-def: $vgpr58
+; GFX9-NEXT: ; implicit-def: $vgpr46
+; GFX9-NEXT: ; implicit-def: $vgpr44
; GFX9-NEXT: ; implicit-def: $vgpr55
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: ; implicit-def: $vgpr41
; GFX9-NEXT: ; implicit-def: $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr38
; GFX9-NEXT: ; implicit-def: $vgpr35
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v6, v6, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: ; implicit-def: $vgpr33
+; GFX9-NEXT: ; implicit-def: $vgpr48
+; GFX9-NEXT: v_or_b32_sdwa v15, v42, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: ; implicit-def: $vgpr42
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v2, v2, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -151514,49 +151432,25 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_or_b32_sdwa v5, v5, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v5, v6, v5, s6
; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
; GFX9-NEXT: ; implicit-def: $vgpr34
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v6, v6, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v6, v6, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v6, v7, v6, s6
; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: ; implicit-def: $vgpr53
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v7, v8, v7, s6
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_or_b32_sdwa v10, v60, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_or_b32_sdwa v11, v56, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_or_b32_sdwa v12, v58, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v13, v46, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v14, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: ; implicit-def: $vgpr60
-; GFX9-NEXT: ; implicit-def: $vgpr56
-; GFX9-NEXT: ; implicit-def: $vgpr58
-; GFX9-NEXT: ; implicit-def: $vgpr46
-; GFX9-NEXT: ; implicit-def: $vgpr44
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v15, v42, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: ; implicit-def: $vgpr42
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v8, v63, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -151722,17 +151616,9 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v28, v29, v28, s6
; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v29, v29, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v31, v31, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: ; implicit-def: $vgpr48
-; GFX9-NEXT: ; implicit-def: $vgpr53
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v30, v30, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v29, v30, v29, s6
@@ -153078,7 +152964,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64bf16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:580
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:576
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:572
@@ -153111,7 +152997,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:464
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:460
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:456
-; GFX11-FAKE16-NEXT: s_clause 0xf
+; GFX11-FAKE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:452
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:448
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:444
@@ -153940,7 +153826,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: v_perm_b32 v31, v116, v31, 0x5040100
; GFX11-FAKE16-NEXT: .LBB88_4: ; %end
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:392
; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:396
; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:400
@@ -153973,7 +153859,7 @@ define <64 x bfloat> @bitcast_v128i8_to_v64bf16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:508
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:512
; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:516
-; GFX11-FAKE16-NEXT: s_clause 0xf
+; GFX11-FAKE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:520
; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:524
; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:528
@@ -154018,7 +153904,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[4:5]
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:332
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:328
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324
@@ -154029,7 +153914,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:304
; SI-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
; SI-NEXT: s_mov_b32 s72, s21
-; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_writelane_b32 v43, s19, 0
; SI-NEXT: v_writelane_b32 v43, s18, 1
; SI-NEXT: v_writelane_b32 v43, s17, 2
@@ -154070,10 +153955,16 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_writelane_b32 v41, s86, 30
; SI-NEXT: v_writelane_b32 v41, s87, 31
; SI-NEXT: v_writelane_b32 v41, s96, 32
+; SI-NEXT: s_mov_b32 s79, s26
; SI-NEXT: v_writelane_b32 v41, s97, 33
; SI-NEXT: v_writelane_b32 v41, s98, 34
; SI-NEXT: v_writelane_b32 v41, s99, 35
-; SI-NEXT: s_mov_b32 s79, s26
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:164
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:160
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:156
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:152
; SI-NEXT: v_readfirstlane_b32 s38, v20
; SI-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
; SI-NEXT: v_readfirstlane_b32 s39, v19
@@ -154100,9 +153991,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_readfirstlane_b32 s18, v5
; SI-NEXT: v_readfirstlane_b32 s19, v6
; SI-NEXT: v_readfirstlane_b32 s88, v4
-; SI-NEXT: v_readfirstlane_b32 s89, v3
-; SI-NEXT: v_readfirstlane_b32 s90, v9
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s6, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:300
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:296
@@ -154110,33 +153999,31 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:288
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:284
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:280
-; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_readfirstlane_b32 s4, v32
; SI-NEXT: v_writelane_b32 v43, s4, 4
-; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_readfirstlane_b32 s4, v33
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:276
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:272
; SI-NEXT: v_writelane_b32 v43, s4, 5
-; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s4, v34
; SI-NEXT: v_writelane_b32 v43, s4, 6
-; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_readfirstlane_b32 s4, v35
; SI-NEXT: v_writelane_b32 v43, s4, 7
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_readfirstlane_b32 s4, v36
; SI-NEXT: v_writelane_b32 v43, s4, 8
-; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_readfirstlane_b32 s4, v37
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:268
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:264
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:260
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:256
; SI-NEXT: v_writelane_b32 v43, s4, 9
-; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_readfirstlane_b32 s4, v38
; SI-NEXT: v_writelane_b32 v43, s4, 10
+; SI-NEXT: v_readfirstlane_b32 s89, v3
+; SI-NEXT: v_readfirstlane_b32 s90, v9
; SI-NEXT: v_readfirstlane_b32 s91, v10
; SI-NEXT: v_readfirstlane_b32 s92, v8
; SI-NEXT: v_readfirstlane_b32 s93, v7
@@ -154219,44 +154106,41 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_readfirstlane_b32 s24, v33
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172
; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:168
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:164
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:160
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:156
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:152
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s78, v34
+; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_readfirstlane_b32 s4, v35
; SI-NEXT: v_writelane_b32 v43, s4, 18
+; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_readfirstlane_b32 s4, v36
; SI-NEXT: v_writelane_b32 v43, s4, 19
-; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_readfirstlane_b32 s4, v37
; SI-NEXT: v_writelane_b32 v43, s4, 20
-; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: v_writelane_b32 v43, s4, 21
-; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_readfirstlane_b32 s4, v38
; SI-NEXT: v_writelane_b32 v43, s4, 22
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_readfirstlane_b32 s4, v39
; SI-NEXT: v_writelane_b32 v43, s4, 23
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_readfirstlane_b32 s4, v48
; SI-NEXT: v_writelane_b32 v43, s4, 24
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_readfirstlane_b32 s4, v49
; SI-NEXT: v_writelane_b32 v43, s4, 25
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_readfirstlane_b32 s4, v50
; SI-NEXT: v_writelane_b32 v43, s4, 26
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_readfirstlane_b32 s4, v51
; SI-NEXT: v_writelane_b32 v43, s4, 27
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:148
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:144
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_readfirstlane_b32 s4, v33
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:136
@@ -154270,7 +154154,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:104
; SI-NEXT: v_writelane_b32 v43, s4, 28
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_readfirstlane_b32 s4, v52
; SI-NEXT: v_writelane_b32 v43, s4, 29
; SI-NEXT: v_readfirstlane_b32 s4, v53
@@ -154279,7 +154163,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_writelane_b32 v43, s4, 31
; SI-NEXT: v_readfirstlane_b32 s4, v55
; SI-NEXT: v_writelane_b32 v43, s4, 32
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_readfirstlane_b32 s4, v40
; SI-NEXT: v_writelane_b32 v43, s4, 33
; SI-NEXT: v_writelane_b32 v43, s22, 34
@@ -155894,33 +155777,53 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v24
; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v26
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:124
+; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:132
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:140
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:148
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:156
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:164
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:172
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:180
+; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:188
+; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:196
+; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:204
+; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:212
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220
+; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:228
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:236
+; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:244
+; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:252
+; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260
+; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:268
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:276
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:284
+; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:292
+; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:300
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:308
+; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:316
+; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:324
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v16
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v18
; VI-NEXT: v_lshlrev_b32_e32 v20, 8, v20
; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v22
-; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v36, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264
@@ -155965,52 +155868,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:68
; VI-NEXT: s_waitcnt vmcnt(10)
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:76
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:84
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:92
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:100
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:108
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:116
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:124
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:132
-; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:140
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:148
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:156
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:164
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:172
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:180
-; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:188
-; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:196
-; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:204
-; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:212
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220
-; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:228
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:236
-; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:244
-; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:252
-; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260
-; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:268
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:276
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:284
-; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:292
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:300
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:308
-; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:316
-; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:324
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
@@ -156030,6 +155887,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
@@ -156038,7 +155896,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
@@ -156070,6 +155927,25 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:76
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:84
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:92
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:100
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:108
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:116
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
; VI-NEXT: s_cbranch_scc0 .LBB89_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
@@ -156094,15 +155970,18 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_or_b32_sdwa v2, v2, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v2, v8
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
@@ -156152,10 +156031,11 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -156163,50 +156043,37 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_or_b32_sdwa v1, v48, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v49, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v1, v60, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v2, v35, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(9)
-; VI-NEXT: v_or_b32_sdwa v3, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_or_b32_sdwa v0, v42, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v1, v41, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v39, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v42, v43
; VI-NEXT: v_mov_b32_e32 v43, v37
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
@@ -156221,13 +156088,12 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v1, v24, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v34, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
@@ -156249,21 +156115,28 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v31, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v54, v33
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v56, v1
; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v2, v35, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v3, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v63, v39
+; VI-NEXT: v_mov_b32_e32 v54, v33
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_mov_b32_e32 v57, v0
; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -156281,11 +156154,10 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v53, v35
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -156318,7 +156190,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: s_branch .LBB89_3
; VI-NEXT: .LBB89_2:
; VI-NEXT: v_mov_b32_e32 v47, v54
-; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
@@ -156339,6 +156210,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v58, v7
; VI-NEXT: v_mov_b32_e32 v57, v5
; VI-NEXT: v_mov_b32_e32 v56, v3
@@ -156930,29 +156802,51 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
+; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:156
+; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:164
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:172
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:180
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:196
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:204
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:220
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:236
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244
+; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252
+; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:260
+; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:268
+; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:276
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:292
+; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:300
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:316
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:324
; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v46
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: s_waitcnt vmcnt(29)
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
@@ -157016,82 +156910,42 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:124
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:132
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:140
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:148
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:156
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:164
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:172
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:180
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:196
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:204
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:220
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:236
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252
-; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:260
-; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:268
-; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:276
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:292
-; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:300
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:316
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:324
-; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(22)
; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(24)
; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(31)
; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(35)
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(35)
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
@@ -157112,6 +156966,13 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(55)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:140
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:148
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; GFX9-NEXT: s_cbranch_scc0 .LBB89_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
@@ -157365,14 +157226,13 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: v_lshl_or_b32 v30, v1, 16, v0
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v31, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
@@ -157382,7 +157242,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: .LBB89_2:
; GFX9-NEXT: v_mov_b32_e32 v58, v50
; GFX9-NEXT: v_mov_b32_e32 v45, v59
-; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
@@ -157394,6 +157253,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v34, v35
+; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v49, v39
@@ -157859,7 +157719,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-LABEL: bitcast_v128i8_to_v64bf16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0x1e
+; GFX11-TRUE16-NEXT: s_clause 0x1e ; 124-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:440
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:436
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:432
@@ -158589,7 +158449,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.h, v182.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v43.l
; GFX11-TRUE16-NEXT: .LBB89_3: ; %end
-; GFX11-TRUE16-NEXT: s_clause 0x1e
+; GFX11-TRUE16-NEXT: s_clause 0x1e ; 124-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:324
; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:328
@@ -158631,7 +158491,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64bf16_scalar:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1e
+; GFX11-FAKE16-NEXT: s_clause 0x1e ; 124-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:440
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:436
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:432
@@ -159415,7 +159275,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v36
; GFX11-FAKE16-NEXT: .LBB89_3: ; %end
-; GFX11-FAKE16-NEXT: s_clause 0x1e
+; GFX11-FAKE16-NEXT: s_clause 0x1e ; 124-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:324
; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:328
@@ -161506,6 +161366,9 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; VI-LABEL: bitcast_v64bf16_to_v128i8:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
@@ -161522,9 +161385,6 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
-; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; VI-NEXT: ; implicit-def: $vgpr35
; VI-NEXT: ; implicit-def: $vgpr45
; VI-NEXT: ; implicit-def: $vgpr34
@@ -161713,166 +161573,165 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v29
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 24, v28
+; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[15:16]
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v28
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v28
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v27
+; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[13:14]
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v27
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 24, v26
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v26
+; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[11:12]
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v26
-; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v16
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v25
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v16
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v25
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v16
+; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[9:10]
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 24, v24
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v15
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v24
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v15
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v24
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v14
+; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[7:8]
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v23
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v14
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v23
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v14
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 24, v22
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[5:6]
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v22
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v13
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v22
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v5
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v21
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v5
+; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[3:4]
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v21
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v4
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 24, v20
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v4
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v20
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v4
+; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[1:2]
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v20
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v3
+; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v16
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v19
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v3
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v16
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v19
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v2
+; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[31:32]
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v16
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 24, v18
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v15
+; VI-NEXT: v_lshrrev_b32_e32 v46, 24, v12
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v18
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v2
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v18
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v1
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[15:16]
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v1
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v17
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[13:14]
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[11:12]
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[9:10]
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[7:8]
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[5:6]
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[3:4]
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[1:2]
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[31:32]
-; VI-NEXT: v_lshrrev_b32_e32 v46, 24, v12
; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v15
+; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v18
; VI-NEXT: v_mov_b32_e32 v45, v46
; VI-NEXT: v_lshrrev_b64 v[46:47], 24, v[29:30]
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v14
+; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v17
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v14
; VI-NEXT: v_lshrrev_b32_e32 v63, 8, v11
; VI-NEXT: v_lshrrev_b32_e32 v50, 8, v31
+; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v17
; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v14
+; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v46, v63
; VI-NEXT: v_mov_b32_e32 v63, v50
; VI-NEXT: v_lshrrev_b64 v[50:51], 24, v[27:28]
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v13
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v13
; VI-NEXT: v_lshrrev_b32_e32 v56, 8, v10
; VI-NEXT: v_lshrrev_b32_e32 v57, 16, v9
; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v43, 24, v8
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v5
; VI-NEXT: v_mov_b32_e32 v51, v57
; VI-NEXT: v_mov_b32_e32 v50, v56
; VI-NEXT: v_lshrrev_b64 v[56:57], 24, v[25:26]
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v5
; VI-NEXT: v_mov_b32_e32 v57, v43
; VI-NEXT: v_lshrrev_b64 v[43:44], 24, v[23:24]
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v4
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v4
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v4
; VI-NEXT: v_lshrrev_b64 v[43:44], 24, v[21:22]
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v3
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v3
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v2
; VI-NEXT: v_lshrrev_b64 v[43:44], 24, v[19:20]
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v2
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v2
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v1
; VI-NEXT: v_lshrrev_b64 v[43:44], 24, v[17:18]
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v10
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v1
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v12
; VI-NEXT: v_lshrrev_b32_e32 v49, 8, v12
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v11
@@ -161885,6 +161744,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b32_e32 v54, 24, v6
; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v6
; VI-NEXT: v_lshrrev_b32_e32 v38, 8, v6
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v37, 24, v32
; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v32
; VI-NEXT: v_lshrrev_b32_e32 v48, 8, v32
@@ -162518,27 +162378,27 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b64 v[33:34], 24, v[21:22]
+; VI-NEXT: v_lshrrev_b32_e32 v43, 24, v28
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[33:34], 24, v[19:20]
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[33:34], 24, v[17:18]
-; VI-NEXT: v_lshrrev_b32_e32 v43, 24, v28
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v28
+; VI-NEXT: v_lshrrev_b64 v[33:34], 24, v[19:20]
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v43, 8, v28
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v27
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v43, 8, v27
+; VI-NEXT: v_lshrrev_b64 v[33:34], 24, v[17:18]
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v43, 24, v26
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v26
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v43, 8, v26
; VI-NEXT: v_lshrrev_b32_e32 v33, 24, v16
@@ -163282,49 +163142,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v4
-; GFX9-NEXT: v_lshrrev_b32_e32 v31, 24, v8
-; GFX9-NEXT: v_lshrrev_b32_e32 v32, 24, v6
-; GFX9-NEXT: v_lshrrev_b32_e32 v58, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX9-NEXT: v_lshrrev_b32_e32 v54, 8, v11
-; GFX9-NEXT: v_lshrrev_b32_e32 v40, 8, v10
-; GFX9-NEXT: v_lshrrev_b32_e32 v39, 8, v8
-; GFX9-NEXT: v_lshrrev_b32_e32 v42, 8, v6
-; GFX9-NEXT: v_lshrrev_b32_e32 v52, 8, v4
-; GFX9-NEXT: v_lshrrev_b32_e32 v48, 8, v3
-; GFX9-NEXT: v_lshrrev_b32_e32 v53, 24, v2
-; GFX9-NEXT: v_lshrrev_b32_e32 v51, 8, v2
-; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v1
-; GFX9-NEXT: v_lshrrev_b32_e32 v41, 8, v1
-; GFX9-NEXT: v_lshrrev_b32_e32 v49, 8, v24
-; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v23
-; GFX9-NEXT: v_lshrrev_b32_e32 v57, 8, v23
-; GFX9-NEXT: v_lshrrev_b32_e32 v37, 24, v22
-; GFX9-NEXT: v_lshrrev_b32_e32 v50, 8, v22
-; GFX9-NEXT: v_lshrrev_b32_e32 v56, 16, v21
-; GFX9-NEXT: v_lshrrev_b32_e32 v55, 8, v21
-; GFX9-NEXT: v_lshrrev_b32_e32 v47, 8, v20
-; GFX9-NEXT: v_lshrrev_b32_e32 v45, 8, v19
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 24, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v60, 8, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v61, 16, v17
-; GFX9-NEXT: v_lshrrev_b32_e32 v43, 8, v17
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v16
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v16
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v16
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v15
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v15
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v58, 16, v18
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v4
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v4
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
@@ -163338,6 +163160,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v63
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(44)
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v62
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v62
@@ -163355,130 +163178,168 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v28
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v28
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v14
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v28
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v14
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v27
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v13
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v27
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v13
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v26
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v12
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v12
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v26
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v12
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v11
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v25
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v10
+; GFX9-NEXT: v_lshrrev_b32_e32 v31, 24, v8
+; GFX9-NEXT: v_lshrrev_b32_e32 v32, 24, v6
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v24
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v10
; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v8
; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v6
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[15:16]
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v9
; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v7
; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v5
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v22
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v54, 8, v11
+; GFX9-NEXT: v_lshrrev_b32_e32 v40, 8, v10
+; GFX9-NEXT: v_lshrrev_b32_e32 v39, 8, v8
; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v31, 8, v7
+; GFX9-NEXT: v_lshrrev_b32_e32 v42, 8, v6
; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v32, 8, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v52, 8, v4
+; GFX9-NEXT: v_lshrrev_b32_e32 v48, 8, v3
+; GFX9-NEXT: v_lshrrev_b32_e32 v53, 24, v2
+; GFX9-NEXT: v_lshrrev_b32_e32 v51, 8, v2
+; GFX9-NEXT: s_waitcnt vmcnt(35)
+; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[15:16]
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v16
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(35)
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[13:14]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[11:12]
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v16
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[9:10]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[7:8]
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v16
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[5:6]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[3:4]
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v15
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[1:2]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[62:63]
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v15
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[29:30]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[27:28]
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v14
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[25:26]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v14
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[23:24]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v14
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v13
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v13
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[21:22]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v12
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v12
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v12
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[19:20]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v11
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v10
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v10
; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[17:18]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v9
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v9
-; GFX9-NEXT: v_lshrrev_b32_e32 v31, 8, v7
-; GFX9-NEXT: v_lshrrev_b32_e32 v32, 8, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v41, 8, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v49, 8, v24
+; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v23
+; GFX9-NEXT: v_lshrrev_b32_e32 v57, 8, v23
+; GFX9-NEXT: v_lshrrev_b32_e32 v37, 24, v22
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v50, 8, v22
+; GFX9-NEXT: v_lshrrev_b32_e32 v56, 16, v21
+; GFX9-NEXT: v_lshrrev_b32_e32 v55, 8, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v20
+; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v47, 8, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v45, 8, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 24, v18
+; GFX9-NEXT: v_lshrrev_b32_e32 v60, 8, v18
+; GFX9-NEXT: v_lshrrev_b32_e32 v61, 16, v17
+; GFX9-NEXT: v_lshrrev_b32_e32 v43, 8, v17
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
@@ -163571,16 +163432,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; GFX9-NEXT: v_cndmask_b32_e32 v13, v18, v19, vcc
; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1
-; GFX9-NEXT: v_mov_b32_e32 v59, v32
; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6
; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v21
-; GFX9-NEXT: v_mov_b32_e32 v58, v31
; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; GFX9-NEXT: v_perm_b32 v14, v13, v0, s7
; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v19, vcc
@@ -163735,7 +163591,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6
; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
-; GFX9-NEXT: s_waitcnt vmcnt(52)
+; GFX9-NEXT: s_waitcnt vmcnt(50)
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v62
; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17
; GFX9-NEXT: v_cndmask_b32_e32 v44, v18, v19, vcc
@@ -163750,7 +163606,6 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6
; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
@@ -163891,8 +163746,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v9
; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; GFX9-NEXT: v_mov_b32_e32 v59, v32
; GFX9-NEXT: v_cndmask_b32_e32 v10, v2, v10, vcc
; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1
+; GFX9-NEXT: v_mov_b32_e32 v58, v31
; GFX9-NEXT: v_add3_u32 v2, v2, v1, s6
; GFX9-NEXT: v_or_b32_e32 v31, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
@@ -163958,6 +163815,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_or_b32_e32 v41, 0x400000, v31
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
; GFX9-NEXT: v_bfe_u32 v31, v13, 16, 1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; GFX9-NEXT: v_perm_b32 v61, v28, v0, s7
; GFX9-NEXT: v_cndmask_b32_e32 v0, v32, v41, vcc
; GFX9-NEXT: v_add3_u32 v31, v31, v13, s6
@@ -163965,7 +163823,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
; GFX9-NEXT: v_cndmask_b32_e32 v13, v31, v32, vcc
; GFX9-NEXT: v_perm_b32 v41, v13, v0, s7
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_lshlrev_b32_e32 v13, 16, v16
; GFX9-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; GFX9-NEXT: v_bfe_u32 v31, v13, 16, 1
@@ -163994,24 +163852,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_or_b32_e32 v45, 0x400000, v15
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
; GFX9-NEXT: v_cndmask_b32_e32 v15, v31, v45, vcc
+; GFX9-NEXT: v_perm_b32 v32, v16, v13, s7
; GFX9-NEXT: v_perm_b32 v31, v15, v26, s7
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v16
-; GFX9-NEXT: v_perm_b32 v32, v16, v13, s7
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v13
-; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v26
; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v14
-; GFX9-NEXT: v_perm_b32 v42, v14, v11, s7
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v12
-; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v10
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
@@ -164031,12 +163879,19 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; GFX9-NEXT: v_perm_b32 v34, v30, v27, s7
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v26
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v27
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v25
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: v_perm_b32 v36, v44, v29, s7
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v29
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v23
+; GFX9-NEXT: v_perm_b32 v42, v14, v11, s7
+; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v22
; GFX9-NEXT: v_perm_b32 v38, v21, v43, s7
@@ -164045,6 +163900,24 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v5
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v4
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v20
+; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_perm_b32 v55, v12, v9, s7
+; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v9
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v2
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v43
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
; GFX9-NEXT: v_perm_b32 v51, v6, v17, s7
; GFX9-NEXT: v_perm_b32 v40, v10, v7, s7
; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v7
@@ -164052,12 +163925,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v57
-; GFX9-NEXT: v_perm_b32 v55, v12, v9, s7
-; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v9
-; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v2
-; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v43
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v47
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v0
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -164101,7 +163970,6 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v56
; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[31:32]
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
@@ -164134,74 +164002,51 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[35:36]
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v32
; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[33:34]
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[60:61]
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v32
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[13:14]
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[62:63]
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[43:44]
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[58:59]
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[58:59]
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v32
+; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[33:34]
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v31
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v31
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v42
+; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[60:61]
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v42
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v41
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v41
+; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[13:14]
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v55
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v55
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v54
+; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[62:63]
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v40
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v39
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 24, v53
@@ -164214,15 +164059,26 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 8, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 24, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 8, v36
+; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[43:44]
; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 8, v35
; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 24, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 8, v34
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v33
+; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[58:59]
+; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v61
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
@@ -164231,6 +164087,9 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v60
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v60
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; GFX9-NEXT: v_lshrrev_b32_e32 v54, 8, v54
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v14
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
@@ -164255,31 +164114,33 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v52, 8, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v48
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; GFX9-NEXT: v_mov_b32_e32 v63, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v35
+; GFX9-NEXT: v_lshrrev_b32_e32 v40, 8, v40
; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v48, 8, v48
; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v63, v16
-; GFX9-NEXT: v_lshrrev_b32_e32 v54, 8, v54
-; GFX9-NEXT: v_lshrrev_b32_e32 v40, 8, v40
-; GFX9-NEXT: v_lshrrev_b32_e32 v48, 8, v48
; GFX9-NEXT: v_mov_b32_e32 v62, v15
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 8, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v34
-; GFX9-NEXT: v_lshrrev_b32_e32 v57, 8, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 24, v44
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 8, v44
-; GFX9-NEXT: v_lshrrev_b32_e32 v56, 16, v43
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 8, v43
+; GFX9-NEXT: s_waitcnt vmcnt(24)
+; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[58:59]
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v57, 8, v34
+; GFX9-NEXT: v_lshrrev_b32_e32 v56, 16, v43
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 24, v59
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 8, v58
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(19)
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v60
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v61
; GFX9-NEXT: v_lshrrev_b32_e32 v47, 8, v61
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 8, v60
@@ -164294,6 +164155,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v10, 8, v54
; GFX9-NEXT: v_or_b32_sdwa v10, v11, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshlrev_b16_e32 v32, 8, v32
; GFX9-NEXT: v_lshlrev_b16_e32 v31, 8, v31
; GFX9-NEXT: v_or_b32_sdwa v5, v5, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -164302,6 +164167,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v31, 8, v39
; GFX9-NEXT: v_or_b32_sdwa v6, v6, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v8, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshlrev_b16_e32 v41, 8, v41
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshlrev_b16_e32 v51, 8, v51
@@ -164310,38 +164177,29 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_or_b32_sdwa v3, v3, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshlrev_b16_e32 v48, 8, v52
; GFX9-NEXT: v_or_b32_sdwa v4, v4, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b16_e32 v11, 8, v11
; GFX9-NEXT: v_or_b32_sdwa v11, v12, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_lshlrev_b16_e32 v12, 8, v12
-; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v12, v13, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_lshlrev_b16_e32 v13, 8, v13
; GFX9-NEXT: v_or_b32_sdwa v13, v14, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_lshlrev_b16_e32 v14, 8, v14
-; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v14, v15, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_lshlrev_b16_e32 v15, 8, v15
; GFX9-NEXT: v_or_b32_sdwa v15, v16, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshlrev_b16_e32 v16, 8, v43
; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshlrev_b16_e32 v17, 8, v60
; GFX9-NEXT: v_or_b32_sdwa v17, v18, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_lshlrev_b16_e32 v18, 8, v31
; GFX9-NEXT: v_or_b32_sdwa v18, v38, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -164679,7 +164537,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v128i8:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x2
+; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:8
+; GFX11-TRUE16-NEXT: scratch_load_b32 v81, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v80, off, s32
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:240
@@ -164712,7 +164574,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1b
+; GFX11-TRUE16-NEXT: s_clause 0x1b ; 112-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:112
@@ -164741,10 +164603,6 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:20
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:16
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v155, s32 offset:12
-; GFX11-TRUE16-NEXT: s_clause 0x2
-; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:8
-; GFX11-TRUE16-NEXT: scratch_load_b32 v81, off, s32 offset:4
-; GFX11-TRUE16-NEXT: scratch_load_b32 v80, off, s32
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr181_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16
@@ -165778,7 +165636,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v155, off, s32 offset:12
; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:16
; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:20
@@ -165811,7 +165669,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:136
-; GFX11-TRUE16-NEXT: s_clause 0x1b
+; GFX11-TRUE16-NEXT: s_clause 0x1b ; 112-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:140
; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:144
; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:148
@@ -165846,7 +165704,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v64bf16_to_v128i8:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x15
+; GFX11-FAKE16-NEXT: s_clause 0x2
+; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-FAKE16-NEXT: s_clause 0x15 ; 88-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:96
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:92
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:88
@@ -165869,10 +165731,6 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:20
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v76, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v77, s32 offset:12
-; GFX11-FAKE16-NEXT: s_clause 0x2
-; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
-; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr76
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr75
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr66
@@ -166991,7 +166849,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112
-; GFX11-FAKE16-NEXT: s_clause 0x15
+; GFX11-FAKE16-NEXT: s_clause 0x15 ; 88-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v77, off, s32 offset:12
; GFX11-FAKE16-NEXT: scratch_load_b32 v76, off, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:20
@@ -169924,6 +169782,15 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v43, s4
; VI-NEXT: v_readlane_b32 s4, v62, 13
; VI-NEXT: v_mov_b32_e32 v46, s4
+; VI-NEXT: v_mov_b32_e32 v45, s72
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v45, s74
+; VI-NEXT: v_mov_b32_e32 v42, s54
+; VI-NEXT: v_mov_b32_e32 v41, s46
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v41, s56
; VI-NEXT: v_readlane_b32 s4, v62, 14
; VI-NEXT: v_mov_b32_e32 v50, s4
; VI-NEXT: v_readlane_b32 s4, v62, 15
@@ -169949,6 +169816,11 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_readlane_b32 s4, v62, 22
; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v55, s4
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v45, s76
; VI-NEXT: v_readlane_b32 s4, v62, 23
; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v55, s4
@@ -169994,6 +169866,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_readlane_b32 s4, v62, 37
; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v55, s4
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_readlane_b32 s4, v62, 38
; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v55, s4
@@ -170052,45 +169926,47 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v55, s4
; VI-NEXT: v_readlane_b32 s4, v62, 57
-; VI-NEXT: v_mov_b32_e32 v42, s54
; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v55, s4
-; VI-NEXT: v_mov_b32_e32 v41, s46
+; VI-NEXT: v_mov_b32_e32 v36, s66
; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v41, s56
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v41, s58
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v41, s60
-; VI-NEXT: v_mov_b32_e32 v45, s72
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v45, s74
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v45, s76
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v45, s78
; VI-NEXT: v_mov_b32_e32 v55, s88
+; VI-NEXT: v_mov_b32_e32 v35, s30
+; VI-NEXT: v_mov_b32_e32 v41, s58
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v36, s66
+; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v35, s85
+; VI-NEXT: v_mov_b32_e32 v34, s38
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v52, s64
-; VI-NEXT: v_mov_b32_e32 v55, v50
-; VI-NEXT: v_mov_b32_e32 v35, s30
; VI-NEXT: v_mov_b32_e32 v59, s87
+; VI-NEXT: v_mov_b32_e32 v41, s60
+; VI-NEXT: v_mov_b32_e32 v55, v50
; VI-NEXT: v_mov_b32_e32 v58, s34
; VI-NEXT: v_mov_b32_e32 v45, s36
-; VI-NEXT: v_mov_b32_e32 v34, s38
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v50, v46
+; VI-NEXT: v_mov_b32_e32 v46, v48
+; VI-NEXT: v_mov_b32_e32 v48, v47
+; VI-NEXT: v_mov_b32_e32 v47, v56
+; VI-NEXT: v_mov_b32_e32 v56, v51
+; VI-NEXT: v_mov_b32_e32 v51, s90
+; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v34, s48
; VI-NEXT: v_mov_b32_e32 v1, s44
; VI-NEXT: v_mov_b32_e32 v2, s45
; VI-NEXT: v_mov_b32_e32 v3, s42
@@ -170123,37 +169999,19 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v30, s29
; VI-NEXT: v_mov_b32_e32 v32, s5
; VI-NEXT: v_mov_b32_e32 v41, s62
+; VI-NEXT: v_mov_b32_e32 v51, v53
+; VI-NEXT: v_mov_b32_e32 v53, v54
+; VI-NEXT: v_mov_b32_e32 v54, v40
+; VI-NEXT: v_mov_b32_e32 v40, s80
; VI-NEXT: v_mov_b32_e32 v57, s81
; VI-NEXT: v_mov_b32_e32 v37, s84
+; VI-NEXT: v_mov_b32_e32 v58, s50
; VI-NEXT: v_mov_b32_e32 v60, s52
; VI-NEXT: v_mov_b32_e32 v38, s51
; VI-NEXT: v_mov_b32_e32 v61, s65
; VI-NEXT: v_mov_b32_e32 v49, s66
-; VI-NEXT: v_mov_b32_e32 v39, s55
-; VI-NEXT: v_mov_b32_e32 v50, v46
-; VI-NEXT: v_mov_b32_e32 v46, v48
-; VI-NEXT: v_mov_b32_e32 v48, v47
-; VI-NEXT: v_mov_b32_e32 v47, v56
-; VI-NEXT: v_mov_b32_e32 v56, v51
-; VI-NEXT: v_mov_b32_e32 v51, s90
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v35, s85
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v34, s48
-; VI-NEXT: v_mov_b32_e32 v51, v53
-; VI-NEXT: v_mov_b32_e32 v53, v54
-; VI-NEXT: v_mov_b32_e32 v54, v40
-; VI-NEXT: v_mov_b32_e32 v40, s80
-; VI-NEXT: v_mov_b32_e32 v58, s50
; VI-NEXT: v_mov_b32_e32 v45, s53
+; VI-NEXT: v_mov_b32_e32 v39, s55
; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; VI-NEXT: .LBB91_5: ; %end
@@ -172194,7 +172052,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_or_saveexec_b32 s4, -1
-; GFX11-TRUE16-NEXT: s_clause 0x3
+; GFX11-TRUE16-NEXT: s_clause 0x3 ; 16-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:8
@@ -173744,7 +173602,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-TRUE16-NEXT: v_readlane_b32 s31, v40, 1
; GFX11-TRUE16-NEXT: v_readlane_b32 s30, v40, 0
; GFX11-TRUE16-NEXT: s_or_saveexec_b32 s0, -1
-; GFX11-TRUE16-NEXT: s_clause 0x3
+; GFX11-TRUE16-NEXT: s_clause 0x3 ; 16-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:8
@@ -173757,7 +173615,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_or_saveexec_b32 s4, -1
-; GFX11-FAKE16-NEXT: s_clause 0x3
+; GFX11-FAKE16-NEXT: s_clause 0x3 ; 16-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:4
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:8
@@ -175314,7 +175172,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-FAKE16-NEXT: v_readlane_b32 s31, v40, 1
; GFX11-FAKE16-NEXT: v_readlane_b32 s30, v40, 0
; GFX11-FAKE16-NEXT: s_or_saveexec_b32 s0, -1
-; GFX11-FAKE16-NEXT: s_clause 0x3
+; GFX11-FAKE16-NEXT: s_clause 0x3 ; 16-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32
; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:4
; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:8
@@ -175488,9 +175346,6 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v37
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:160
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:168
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:176
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v38
@@ -175508,6 +175363,9 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:220
; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:192
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:200
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:160
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:168
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:176
; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; implicit-def: $vgpr37
@@ -175525,15 +175383,15 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(4) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v6
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:208
@@ -175669,34 +175527,37 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v6
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:360
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v4
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr6
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:368
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:388
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:384
-; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:360
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
+; SI-NEXT: ; implicit-def: $vgpr7
+; SI-NEXT: ; implicit-def: $vgpr6
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:976 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:48
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:104
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:88
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:80
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:72
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:56
+; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40
@@ -175716,7 +175577,10 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:368
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:112
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:120
@@ -175726,7 +175590,9 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
+; SI-NEXT: ; implicit-def: $vgpr11
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:216
@@ -175752,14 +175618,6 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:376
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:104
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:88
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:80
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:56
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
@@ -175882,7 +175740,6 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v2, 0xff, v47
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v6, 0xff, v42
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_and_b32_e32 v18, 0xff, v18
; SI-NEXT: v_and_b32_e32 v22, 0xff, v22
; SI-NEXT: v_and_b32_e32 v24, 0xff, v24
@@ -176540,25 +176397,18 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v20, 0xff, v20
; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16
; SI-NEXT: v_and_b32_e32 v16, 0xff, v16
-; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_add_i32_e32 v34, vcc, 3, v34
; SI-NEXT: v_and_b32_e32 v34, 0xff, v34
-; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30
; SI-NEXT: v_and_b32_e32 v30, 0xff, v30
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28
; SI-NEXT: v_and_b32_e32 v28, 0xff, v28
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26
; SI-NEXT: v_and_b32_e32 v26, 0xff, v26
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24
; SI-NEXT: v_and_b32_e32 v24, 0xff, v24
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22
; SI-NEXT: v_and_b32_e32 v22, 0xff, v22
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18
; SI-NEXT: v_and_b32_e32 v18, 0xff, v18
; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4
@@ -177657,8 +177507,8 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v25
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v29
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v55, 8, v3
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v40, 8, v5
; VI-NEXT: v_lshlrev_b16_e32 v41, 8, v7
; VI-NEXT: v_lshlrev_b16_e32 v50, 8, v9
@@ -177754,13 +177604,25 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:52
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -177888,14 +177750,19 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v38, 8, v0
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_lshlrev_b16_e32 v39, 8, v1
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v49, 8, v2
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshlrev_b16_e32 v51, 8, v3
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:356
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_lshlrev_b16_e32 v51, 8, v3
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b16_e32 v53, 8, v0
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -177903,26 +177770,6 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshlrev_b16_e32 v53, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:52
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -177931,35 +177778,57 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_or_b32_sdwa v0, v0, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: s_waitcnt vmcnt(12)
; VI-NEXT: v_or_b32_sdwa v1, v1, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(11)
; VI-NEXT: v_or_b32_sdwa v3, v3, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v2, v2, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: v_or_b32_sdwa v2, v2, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_or_b32_sdwa v4, v4, v48 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_or_b32_sdwa v10, v61, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v5, v5, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v11, v57, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_or_b32_sdwa v6, v6, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v12, v59, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v13, v47, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v14, v45, v14 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v15, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: ; implicit-def: $vgpr61
+; VI-NEXT: ; implicit-def: $vgpr57
+; VI-NEXT: ; implicit-def: $vgpr59
+; VI-NEXT: ; implicit-def: $vgpr47
+; VI-NEXT: ; implicit-def: $vgpr45
+; VI-NEXT: ; implicit-def: $vgpr43
; VI-NEXT: ; implicit-def: $vgpr54
; VI-NEXT: ; implicit-def: $vgpr55
; VI-NEXT: ; implicit-def: $vgpr40
; VI-NEXT: ; implicit-def: $vgpr41
; VI-NEXT: ; implicit-def: $vgpr48
; VI-NEXT: ; implicit-def: $vgpr36
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v6, v6, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: ; implicit-def: $vgpr34
+; VI-NEXT: v_or_b32_sdwa v31, v31, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: ; implicit-def: $vgpr49
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v2, v2, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -177992,39 +177861,19 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v6, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: ; implicit-def: $vgpr53
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_or_b32_sdwa v10, v61, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_or_b32_sdwa v11, v57, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_or_b32_sdwa v12, v59, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_or_b32_sdwa v13, v47, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v14, v45, v14 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr61
-; VI-NEXT: ; implicit-def: $vgpr57
-; VI-NEXT: ; implicit-def: $vgpr59
-; VI-NEXT: ; implicit-def: $vgpr47
-; VI-NEXT: ; implicit-def: $vgpr45
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v15, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr43
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v8, v62, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -178190,17 +178039,9 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v28, v28, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v29, v29, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v31, v31, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr49
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr53
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v30, v30, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v29, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -178885,8 +178726,8 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v27
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v29
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v54, 8, v3
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v41, 8, v5
; GFX9-NEXT: v_lshlrev_b16_e32 v40, 8, v7
; GFX9-NEXT: v_lshlrev_b16_e32 v51, 8, v9
@@ -178997,13 +178838,27 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(16)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -179136,14 +178991,19 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v37, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_lshlrev_b16_e32 v49, 8, v1
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_lshlrev_b16_e32 v48, 8, v2
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v52, 8, v3
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:356
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v52, 8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b16_e32 v53, 8, v0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -179151,26 +179011,6 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v53, 8, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -179179,36 +179019,62 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; GFX9-NEXT: s_mov_b32 s6, 0x5040100
+; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v55 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
+; GFX9-NEXT: s_waitcnt vmcnt(14)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_or_b32_sdwa v2, v2, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v3, v3, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v0, v1, v0, s6
+; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: v_or_b32_sdwa v3, v3, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v1, v3, v2, s6
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(10)
+; GFX9-NEXT: v_or_b32_sdwa v10, v60, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v11, v56, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v4, v4, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_or_b32_sdwa v12, v58, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v5, v5, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v13, v46, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v6, v6, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v14, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v31, v31, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: ; implicit-def: $vgpr60
+; GFX9-NEXT: ; implicit-def: $vgpr56
+; GFX9-NEXT: ; implicit-def: $vgpr58
+; GFX9-NEXT: ; implicit-def: $vgpr46
+; GFX9-NEXT: ; implicit-def: $vgpr44
; GFX9-NEXT: ; implicit-def: $vgpr55
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: ; implicit-def: $vgpr41
; GFX9-NEXT: ; implicit-def: $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr38
; GFX9-NEXT: ; implicit-def: $vgpr35
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v6, v6, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: ; implicit-def: $vgpr33
+; GFX9-NEXT: ; implicit-def: $vgpr48
+; GFX9-NEXT: v_or_b32_sdwa v15, v42, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: ; implicit-def: $vgpr42
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v2, v2, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -179231,49 +179097,25 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_or_b32_sdwa v5, v5, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v5, v6, v5, s6
; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
; GFX9-NEXT: ; implicit-def: $vgpr34
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v6, v6, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v6, v6, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v6, v7, v6, s6
; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: ; implicit-def: $vgpr53
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v7, v8, v7, s6
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_or_b32_sdwa v10, v60, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_or_b32_sdwa v11, v56, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_or_b32_sdwa v12, v58, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v13, v46, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v14, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: ; implicit-def: $vgpr60
-; GFX9-NEXT: ; implicit-def: $vgpr56
-; GFX9-NEXT: ; implicit-def: $vgpr58
-; GFX9-NEXT: ; implicit-def: $vgpr46
-; GFX9-NEXT: ; implicit-def: $vgpr44
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v15, v42, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: ; implicit-def: $vgpr42
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v8, v63, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -179439,17 +179281,9 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v28, v29, v28, s6
; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v29, v29, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v31, v31, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: ; implicit-def: $vgpr48
-; GFX9-NEXT: ; implicit-def: $vgpr53
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v30, v30, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v29, v30, v29, s6
@@ -180795,7 +180629,7 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64f16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:580
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:576
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:572
@@ -180828,7 +180662,7 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:464
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:460
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:456
-; GFX11-FAKE16-NEXT: s_clause 0xf
+; GFX11-FAKE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:452
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:448
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:444
@@ -181657,7 +181491,7 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: v_perm_b32 v31, v116, v31, 0x5040100
; GFX11-FAKE16-NEXT: .LBB92_4: ; %end
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:392
; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:396
; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:400
@@ -181690,7 +181524,7 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:508
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:512
; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:516
-; GFX11-FAKE16-NEXT: s_clause 0xf
+; GFX11-FAKE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:520
; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:524
; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:528
@@ -183515,33 +183349,53 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v24
; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v26
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:124
+; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:132
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:140
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:148
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:156
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:164
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:172
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:180
+; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:188
+; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:196
+; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:204
+; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:212
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220
+; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:228
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:236
+; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:244
+; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:252
+; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260
+; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:268
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:276
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:284
+; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:292
+; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:300
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:308
+; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:316
+; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:324
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v16
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v18
; VI-NEXT: v_lshlrev_b32_e32 v20, 8, v20
; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v22
-; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v36, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264
@@ -183586,52 +183440,6 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:68
; VI-NEXT: s_waitcnt vmcnt(10)
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:76
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:84
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:92
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:100
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:108
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:116
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:124
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:132
-; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:140
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:148
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:156
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:164
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:172
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:180
-; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:188
-; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:196
-; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:204
-; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:212
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220
-; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:228
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:236
-; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:244
-; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:252
-; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260
-; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:268
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:276
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:284
-; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:292
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:300
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:308
-; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:316
-; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:324
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
@@ -183651,6 +183459,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
@@ -183659,7 +183468,6 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
@@ -183691,6 +183499,25 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:76
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:84
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:92
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:100
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:108
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:116
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
; VI-NEXT: s_cbranch_scc0 .LBB93_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
@@ -183715,15 +183542,18 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_or_b32_sdwa v2, v2, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v2, v8
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
@@ -183773,10 +183603,11 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -183784,50 +183615,37 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_or_b32_sdwa v1, v48, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v49, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v1, v60, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v2, v35, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(9)
-; VI-NEXT: v_or_b32_sdwa v3, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_or_b32_sdwa v0, v42, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v1, v41, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v39, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v42, v43
; VI-NEXT: v_mov_b32_e32 v43, v37
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
@@ -183842,13 +183660,12 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v1, v24, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v34, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
@@ -183870,21 +183687,28 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v31, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v54, v33
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v56, v1
; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v2, v35, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v3, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v63, v39
+; VI-NEXT: v_mov_b32_e32 v54, v33
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_mov_b32_e32 v57, v0
; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -183902,11 +183726,10 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v53, v35
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -183939,7 +183762,6 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: s_branch .LBB93_3
; VI-NEXT: .LBB93_2:
; VI-NEXT: v_mov_b32_e32 v47, v54
-; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
@@ -183960,6 +183782,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v58, v7
; VI-NEXT: v_mov_b32_e32 v57, v5
; VI-NEXT: v_mov_b32_e32 v56, v3
@@ -184551,29 +184374,51 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
+; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:156
+; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:164
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:172
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:180
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:196
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:204
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:220
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:236
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244
+; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252
+; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:260
+; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:268
+; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:276
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:292
+; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:300
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:316
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:324
; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v46
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: s_waitcnt vmcnt(29)
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
@@ -184637,82 +184482,42 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:124
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:132
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:140
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:148
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:156
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:164
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:172
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:180
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:196
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:204
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:220
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:236
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252
-; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:260
-; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:268
-; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:276
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:292
-; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:300
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:316
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:324
-; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(22)
; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(24)
; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(31)
; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(35)
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(35)
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
@@ -184733,6 +184538,13 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(55)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:140
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:148
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; GFX9-NEXT: s_cbranch_scc0 .LBB93_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
@@ -184986,14 +184798,13 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: v_lshl_or_b32 v30, v1, 16, v0
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v31, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
@@ -185003,7 +184814,6 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: .LBB93_2:
; GFX9-NEXT: v_mov_b32_e32 v58, v50
; GFX9-NEXT: v_mov_b32_e32 v45, v59
-; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
@@ -185015,6 +184825,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v34, v35
+; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v49, v39
@@ -185480,7 +185291,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-LABEL: bitcast_v128i8_to_v64f16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0x1e
+; GFX11-TRUE16-NEXT: s_clause 0x1e ; 124-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:440
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:436
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:432
@@ -186210,7 +186021,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.h, v182.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v43.l
; GFX11-TRUE16-NEXT: .LBB93_3: ; %end
-; GFX11-TRUE16-NEXT: s_clause 0x1e
+; GFX11-TRUE16-NEXT: s_clause 0x1e ; 124-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:324
; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:328
@@ -186252,7 +186063,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64f16_scalar:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1e
+; GFX11-FAKE16-NEXT: s_clause 0x1e ; 124-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:440
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:436
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:432
@@ -187036,7 +186847,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v36
; GFX11-FAKE16-NEXT: .LBB93_3: ; %end
-; GFX11-FAKE16-NEXT: s_clause 0x1e
+; GFX11-FAKE16-NEXT: s_clause 0x1e ; 124-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:324
; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:328
@@ -189098,27 +188909,42 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v62, 16, v4
+; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v44, v12
; VI-NEXT: v_mov_b32_e32 v12, v0
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:8
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32
+; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v8
+; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v32, v20
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v16
; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v43, v11
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v40, 16, v10
-; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v8
-; VI-NEXT: v_lshrrev_b32_e32 v62, 16, v4
-; VI-NEXT: v_mov_b32_e32 v32, v20
+; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v55, v22
; VI-NEXT: v_mov_b32_e32 v54, v21
; VI-NEXT: v_mov_b32_e32 v31, v19
+; VI-NEXT: v_mov_b32_e32 v43, v11
; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v44
+; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v43
; VI-NEXT: ; implicit-def: $vgpr20
; VI-NEXT: ; implicit-def: $vgpr57
; VI-NEXT: ; implicit-def: $vgpr51
+; VI-NEXT: ; implicit-def: $vgpr8
+; VI-NEXT: ; implicit-def: $vgpr4
; VI-NEXT: ; implicit-def: $vgpr41
; VI-NEXT: ; implicit-def: $vgpr56
; VI-NEXT: ; implicit-def: $vgpr63
@@ -189130,47 +188956,38 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: ; implicit-def: $vgpr42
; VI-NEXT: ; implicit-def: $vgpr45
; VI-NEXT: ; implicit-def: $vgpr52
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v30
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v32
-; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v15
-; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v43
-; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v31
-; VI-NEXT: ; implicit-def: $vgpr8
-; VI-NEXT: ; implicit-def: $vgpr15
-; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v13
-; VI-NEXT: v_lshrrev_b32_e32 v19, 16, v9
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v18
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v34
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v7
; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v6
; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v15
+; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v13
+; VI-NEXT: v_lshrrev_b32_e32 v19, 16, v9
+; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v31
+; VI-NEXT: ; implicit-def: $vgpr15
; VI-NEXT: ; implicit-def: $vgpr13
; VI-NEXT: ; implicit-def: $vgpr9
-; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; VI-NEXT: ; implicit-def: $vgpr5
-; VI-NEXT: ; implicit-def: $vgpr4
-; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v30
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr30
+; VI-NEXT: ; implicit-def: $vgpr34
; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v29
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v28
@@ -189179,38 +188996,6 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr29
; VI-NEXT: ; implicit-def: $vgpr28
-; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v27
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v26
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; VI-NEXT: ; implicit-def: $vgpr27
-; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v25
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v24
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v18
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v34
-; VI-NEXT: ; implicit-def: $vgpr25
-; VI-NEXT: ; implicit-def: $vgpr24
-; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v23
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v55
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v54
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; VI-NEXT: ; implicit-def: $vgpr23
-; VI-NEXT: ; implicit-def: $vgpr34
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v33
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr0
@@ -189254,8 +189039,34 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: ; implicit-def: $vgpr0
; VI-NEXT: ; kill: killed $vgpr0
; VI-NEXT: ; implicit-def: $vgpr0
+; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; VI-NEXT: ; kill: killed $vgpr0
; VI-NEXT: ; implicit-def: $vgpr0
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; VI-NEXT: ; implicit-def: $vgpr5
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v27
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v26
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; VI-NEXT: ; implicit-def: $vgpr27
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v25
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v24
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; VI-NEXT: ; implicit-def: $vgpr25
+; VI-NEXT: ; implicit-def: $vgpr24
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v23
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v55
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v54
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; VI-NEXT: ; implicit-def: $vgpr23
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; VI-NEXT: ; implicit-def: $vgpr10
@@ -189293,28 +189104,49 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v56, v38
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v45, v7
-; VI-NEXT: v_mov_b32_e32 v63, v53
+; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v15, v3
+; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v28, v48
; VI-NEXT: v_mov_b32_e32 v48, v16
; VI-NEXT: v_mov_b32_e32 v16, v40
; VI-NEXT: v_mov_b32_e32 v47, v39
+; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v63, v53
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v32
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v31
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v18
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v17
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v29, 24, v44
; VI-NEXT: v_lshrrev_b32_e32 v5, 24, v32
; VI-NEXT: v_lshrrev_b32_e32 v13, 24, v18
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshrrev_b32_e32 v2, 24, v1
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v1
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v0
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[0:1]
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v38
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v37
@@ -189326,83 +189158,20 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v62, v36
-; VI-NEXT: v_lshrrev_b32_e32 v41, 24, v38
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v11
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v10
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; VI-NEXT: v_lshrrev_b32_e32 v8, 24, v11
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshrrev_b32_e32 v23, 8, v6
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_lshrrev_b32_e32 v2, 24, v7
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v7
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshrrev_b32_e32 v24, 8, v52
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_lshrrev_b32_e32 v57, 24, v53
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_lshrrev_b32_e32 v4, 24, v3
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; VI-NEXT: v_lshrrev_b32_e32 v20, 8, v53
-; VI-NEXT: v_lshrrev_b32_e32 v19, 8, v2
-; VI-NEXT: v_lshrrev_b32_e32 v25, 8, v3
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_lshrrev_b32_e32 v4, 24, v59
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v59
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v58
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v26
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshrrev_b32_e32 v14, 24, v27
-; VI-NEXT: v_lshrrev_b32_e32 v60, 8, v27
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshrrev_b32_e32 v9, 8, v33
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v34
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v62, v36
; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; VI-NEXT: v_lshrrev_b32_e32 v42, 24, v34
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshrrev_b32_e32 v22, 8, v35
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_lshrrev_b32_e32 v9, 24, v36
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v31
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v18
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v17
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[0:1]
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[37:38]
@@ -189417,61 +189186,94 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[6:7]
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[2:3]
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[26:27]
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[33:34]
-; VI-NEXT: v_lshrrev_b32_e32 v46, 8, v36
-; VI-NEXT: v_lshrrev_b64 v[37:38], 24, v[35:36]
+; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: v_lshrrev_b32_e32 v9, 8, v33
+; VI-NEXT: v_lshrrev_b32_e32 v41, 24, v38
+; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v30, 8, v50
+; VI-NEXT: v_lshrrev_b32_e32 v21, 8, v49
+; VI-NEXT: v_lshrrev_b32_e32 v51, 24, v40
+; VI-NEXT: v_lshrrev_b32_e32 v8, 24, v11
+; VI-NEXT: v_lshrrev_b32_e32 v57, 24, v53
+; VI-NEXT: v_lshrrev_b32_e32 v20, 8, v53
+; VI-NEXT: v_lshrrev_b32_e32 v24, 8, v52
+; VI-NEXT: v_lshrrev_b32_e32 v14, 24, v27
+; VI-NEXT: v_lshrrev_b32_e32 v42, 24, v34
; VI-NEXT: v_lshrrev_b64 v[10:11], 24, v[52:53]
; VI-NEXT: v_lshrrev_b64 v[52:53], 24, v[58:59]
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[33:34], 24, v[31:32]
+; VI-NEXT: v_lshrrev_b32_e32 v60, 8, v27
; VI-NEXT: v_mov_b32_e32 v53, v63
-; VI-NEXT: v_mov_b32_e32 v27, v19
-; VI-NEXT: v_mov_b32_e32 v34, v14
-; VI-NEXT: v_lshrrev_b32_e32 v9, 24, v55
+; VI-NEXT: v_lshrrev_b32_e32 v63, 8, v40
+; VI-NEXT: v_lshrrev_b32_e32 v23, 8, v6
; VI-NEXT: v_mov_b32_e32 v7, v45
; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v31
-; VI-NEXT: v_mov_b32_e32 v3, v15
-; VI-NEXT: v_mov_b32_e32 v15, v29
-; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v17
-; VI-NEXT: v_mov_b32_e32 v38, v56
-; VI-NEXT: v_mov_b32_e32 v29, v41
; VI-NEXT: v_mov_b32_e32 v45, v60
-; VI-NEXT: v_lshrrev_b32_e32 v41, 8, v55
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_lshrrev_b32_e32 v21, 8, v49
+; VI-NEXT: v_lshrrev_b32_e32 v19, 8, v2
+; VI-NEXT: s_waitcnt vmcnt(13)
+; VI-NEXT: v_lshrrev_b32_e32 v4, 24, v3
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v4, 24, v59
+; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[2:3]
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v59
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v58
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v26
+; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[26:27]
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v34
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: v_lshrrev_b32_e32 v9, 24, v36
+; VI-NEXT: v_lshrrev_b32_e32 v46, 8, v36
+; VI-NEXT: v_lshrrev_b32_e32 v22, 8, v35
; VI-NEXT: v_lshrrev_b32_e32 v4, 24, v50
-; VI-NEXT: v_lshrrev_b32_e32 v30, 8, v50
-; VI-NEXT: v_lshrrev_b32_e32 v51, 24, v40
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b64 v[37:38], 24, v[35:36]
; VI-NEXT: v_lshrrev_b64 v[35:36], 24, v[49:50]
; VI-NEXT: v_lshrrev_b64 v[49:50], 24, v[39:40]
; VI-NEXT: v_mov_b32_e32 v58, v51
+; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[33:34]
; VI-NEXT: v_mov_b32_e32 v36, v62
; VI-NEXT: v_lshrrev_b64 v[61:62], 24, v[54:55]
+; VI-NEXT: v_lshrrev_b64 v[33:34], 24, v[31:32]
; VI-NEXT: v_lshrrev_b64 v[50:51], 24, v[17:18]
-; VI-NEXT: v_lshrrev_b32_e32 v63, 8, v40
-; VI-NEXT: v_mov_b32_e32 v40, v16
-; VI-NEXT: v_mov_b32_e32 v16, v48
-; VI-NEXT: v_mov_b32_e32 v48, v28
-; VI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v27, v19
+; VI-NEXT: v_mov_b32_e32 v34, v14
; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v40, v16
+; VI-NEXT: v_mov_b32_e32 v16, v48
+; VI-NEXT: v_mov_b32_e32 v48, v28
+; VI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: v_lshrrev_b32_e32 v25, 8, v3
+; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v9, 24, v55
+; VI-NEXT: v_mov_b32_e32 v3, v15
+; VI-NEXT: v_mov_b32_e32 v15, v29
+; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v17
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v38, v56
; VI-NEXT: v_lshrrev_b32_e32 v56, 8, v39
+; VI-NEXT: v_mov_b32_e32 v29, v41
; VI-NEXT: v_mov_b32_e32 v39, v47
; VI-NEXT: v_mov_b32_e32 v47, v4
; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v54
+; VI-NEXT: v_lshrrev_b32_e32 v41, 8, v55
; VI-NEXT: .LBB94_2: ; %Flow
; VI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; VI-NEXT: s_cbranch_execz .LBB94_4
; VI-NEXT: ; %bb.3: ; %cmp.true
+; VI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v63, 0x200
; VI-NEXT: v_add_f16_sdwa v21, v18, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v21
@@ -189490,36 +189292,47 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; VI-NEXT: v_add_f16_e32 v31, 0x200, v31
; VI-NEXT: v_add_f16_sdwa v23, v55, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(9)
; VI-NEXT: v_or_b32_e32 v14, v31, v0
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v23
; VI-NEXT: v_add_f16_e32 v55, 0x200, v55
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_or_b32_e32 v62, v55, v0
; VI-NEXT: v_add_f16_sdwa v0, v54, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; VI-NEXT: v_add_f16_e32 v54, 0x200, v54
; VI-NEXT: v_or_b32_e32 v61, v54, v0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v26, v54
; VI-NEXT: v_mov_b32_e32 v27, v55
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(10)
; VI-NEXT: v_add_f16_sdwa v60, v25, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v60
; VI-NEXT: v_add_f16_e32 v25, 0x200, v25
; VI-NEXT: v_or_b32_e32 v34, v25, v0
; VI-NEXT: v_add_f16_sdwa v0, v24, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v24, 0x200, v24
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_add_f16_sdwa v11, v7, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT: v_add_f16_e32 v7, 0x200, v7
+; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: v_add_f16_sdwa v13, v54, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT: v_add_f16_e32 v54, 0x200, v54
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; VI-NEXT: v_or_b32_e32 v33, v24, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_f16_sdwa v0, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
@@ -189527,13 +189340,21 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_or_b32_e32 v36, v2, v0
; VI-NEXT: v_add_f16_sdwa v0, v1, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v1, 0x200, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; VI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_e32 v35, v1, v0
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_add_f16_sdwa v19, v24, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_add_f16_sdwa v42, v25, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT: v_add_f16_e32 v25, 0x200, v25
+; VI-NEXT: v_add_f16_e32 v24, 0x200, v24
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_f16_sdwa v0, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
@@ -189542,38 +189363,34 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_or_b32_e32 v38, v2, v0
; VI-NEXT: v_add_f16_sdwa v0, v1, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v1, 0x200, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_e32 v37, v1, v0
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_f16_sdwa v1, v8, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_f16_sdwa v0, v9, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v9, 0x200, v9
+; VI-NEXT: v_add_f16_sdwa v1, v8, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v8, 0x200, v8
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; VI-NEXT: v_or_b32_e32 v49, v9, v0
-; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_add_f16_sdwa v47, v3, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v3, 0x200, v3
+; VI-NEXT: v_or_b32_e32 v49, v9, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v1
; VI-NEXT: v_add_f16_sdwa v1, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v2, 0x200, v2
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: v_or_b32_e32 v48, v8, v0
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v47
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v9, v31
; VI-NEXT: v_add_f16_sdwa v8, v43, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v10, v32
@@ -189591,11 +189408,11 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_add_f16_e32 v2, 0x200, v2
; VI-NEXT: v_add_f16_e32 v1, 0x200, v1
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; VI-NEXT: v_or_b32_e32 v53, v2, v0
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v3
+; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; VI-NEXT: v_add_f16_sdwa v3, v44, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v44, 0x200, v44
; VI-NEXT: v_or_b32_e32 v52, v1, v0
@@ -189612,28 +189429,32 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_or_b32_e32 v46, v2, v0
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
; VI-NEXT: v_or_b32_e32 v45, v1, v0
-; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_f16_sdwa v1, v6, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_f16_sdwa v11, v7, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT: v_add_f16_e32 v7, 0x200, v7
; VI-NEXT: v_add_f16_e32 v6, 0x200, v6
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v11
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; VI-NEXT: v_or_b32_e32 v5, v7, v0
; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_e32 v4, v6, v0
; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_add_f16_sdwa v16, v1, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_add_f16_sdwa v28, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT: v_add_f16_e32 v2, 0x200, v2
+; VI-NEXT: v_add_f16_e32 v1, 0x200, v1
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_add_f16_sdwa v39, v6, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_f16_sdwa v56, v7, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v7, 0x200, v7
; VI-NEXT: v_add_f16_e32 v6, 0x200, v6
@@ -189641,36 +189462,13 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_or_b32_e32 v41, v7, v0
; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v39
-; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_e32 v40, v6, v0
-; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_add_f16_sdwa v19, v24, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_add_f16_sdwa v42, v25, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT: v_add_f16_e32 v25, 0x200, v25
-; VI-NEXT: v_add_f16_e32 v24, 0x200, v24
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v42
; VI-NEXT: v_or_b32_e32 v7, v25, v0
; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_add_f16_sdwa v28, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT: v_add_f16_e32 v2, 0x200, v2
-; VI-NEXT: v_add_f16_sdwa v16, v1, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT: v_add_f16_e32 v1, 0x200, v1
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v19
-; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_add_f16_sdwa v13, v54, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; VI-NEXT: v_add_f16_e32 v54, 0x200, v54
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v25, 8, v46
; VI-NEXT: v_or_b32_e32 v6, v24, v0
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v3
@@ -189679,7 +189477,6 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_or_b32_e32 v31, v43, v0
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v28
; VI-NEXT: v_or_b32_e32 v30, v2, v0
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_add_f16_sdwa v2, v55, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v55, 0x200, v55
; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
@@ -189695,8 +189492,6 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b32_e32 v13, 8, v1
; VI-NEXT: v_lshrrev_b32_e32 v54, 8, v0
; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[0:1]
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v30
@@ -189714,21 +189509,21 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v7
+; VI-NEXT: v_mov_b32_e32 v32, v10
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v6
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[6:7]
-; VI-NEXT: v_mov_b32_e32 v32, v10
; VI-NEXT: v_mov_b32_e32 v31, v9
; VI-NEXT: v_lshrrev_b32_e32 v10, 8, v41
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[6:7]
; VI-NEXT: v_mov_b32_e32 v7, v11
; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b64 v[10:11], 24, v[40:41]
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v55, v27
; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v54, v26
; VI-NEXT: v_mov_b32_e32 v26, v20
; VI-NEXT: v_lshrrev_b32_e32 v20, 8, v5
@@ -189736,23 +189531,14 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_mov_b32_e32 v5, v22
; VI-NEXT: v_mov_b32_e32 v13, v21
; VI-NEXT: v_lshrrev_b64 v[21:22], 24, v[45:46]
-; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v53
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v52
; VI-NEXT: v_lshrrev_b64 v[21:22], 24, v[50:51]
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v50
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v21, 8, v48
; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b64 v[21:22], 24, v[48:49]
-; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v49
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v30, 8, v36
@@ -189760,27 +189546,39 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b64 v[35:36], 24, v[35:36]
; VI-NEXT: v_mov_b32_e32 v36, v2
; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v15
-; VI-NEXT: v_lshrrev_b32_e32 v41, 8, v62
-; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v61
-; VI-NEXT: v_lshrrev_b64 v[61:62], 24, v[61:62]
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v14
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v53
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v52
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v50
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v49
; VI-NEXT: v_mov_b32_e32 v48, v56
; VI-NEXT: v_lshrrev_b32_e32 v56, 8, v33
; VI-NEXT: v_lshrrev_b64 v[49:50], 24, v[33:34]
; VI-NEXT: v_lshrrev_b64 v[33:34], 24, v[14:15]
; VI-NEXT: v_lshrrev_b32_e32 v14, 8, v58
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v41, 8, v62
+; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v61
+; VI-NEXT: v_lshrrev_b64 v[61:62], 24, v[61:62]
; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v14, 8, v57
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v9, v23
; VI-NEXT: v_lshrrev_b32_e32 v23, 8, v40
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v14, v8
+; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v40, v42
; VI-NEXT: v_bfe_u32 v8, v42, 8, 8
+; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
; VI-NEXT: v_lshrrev_b32_e32 v46, 8, v38
; VI-NEXT: v_lshrrev_b32_e32 v22, 8, v37
; VI-NEXT: v_lshrrev_b64 v[37:38], 24, v[37:38]
@@ -189797,26 +189595,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_bfe_u32 v51, v48, 8, 8
; VI-NEXT: v_bfe_u32 v57, v7, 8, 8
; VI-NEXT: v_bfe_u32 v58, v60, 8, 8
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_bfe_u32 v34, v62, 8, 8
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_bfe_u32 v2, v2, 8, 8
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; VI-NEXT: v_bfe_u32 v34, v47, 8, 8
; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; VI-NEXT: v_bfe_u32 v9, v9, 8, 8
; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; VI-NEXT: v_bfe_u32 v5, v5, 8, 8
; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; VI-NEXT: v_bfe_u32 v13, v13, 8, 8
-; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(12)
+; VI-NEXT: v_bfe_u32 v2, v2, 8, 8
+; VI-NEXT: s_waitcnt vmcnt(11)
; VI-NEXT: v_bfe_u32 v42, v0, 8, 8
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: v_bfe_u32 v34, v62, 8, 8
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; VI-NEXT: v_bfe_u32 v34, v47, 8, 8
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_bfe_u32 v0, v0, 8, 8
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
@@ -189986,16 +189782,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1
-; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_add_u32_e32 v1, vcc, 56, v12
@@ -190067,14 +189862,13 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x54, v12
; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v56
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v49
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x58, v12
@@ -190082,41 +189876,42 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v63
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v58
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x5c, v12
; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v21
; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v35
; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x60, v12
; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v30
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v47
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x64, v12
; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v22
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v37
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x68, v12
@@ -190124,6 +189919,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v46
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v0, v29, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1
@@ -190161,17 +189957,16 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x74, v12
; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x78, v12
@@ -190179,6 +189974,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v45
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v34
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -190207,22 +190003,6 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX9-LABEL: bitcast_v64f16_to_v128i8:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
@@ -190285,6 +190065,23 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX9-NEXT: ; implicit-def: $vgpr50
; GFX9-NEXT: ; kill: killed $vgpr50
; GFX9-NEXT: ; implicit-def: $vgpr50
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr44
; GFX9-NEXT: ; kill: killed $vgpr50
; GFX9-NEXT: ; implicit-def: $vgpr50
@@ -190315,7 +190112,6 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX9-NEXT: ; implicit-def: $vgpr52
; GFX9-NEXT: ; implicit-def: $vgpr51
; GFX9-NEXT: ; implicit-def: $vgpr50
-; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
@@ -190349,7 +190145,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(17)
+; GFX9-NEXT: s_waitcnt vmcnt(33)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; GFX9-NEXT: ; implicit-def: $vgpr33
; GFX9-NEXT: ; kill: killed $vgpr33
@@ -190472,101 +190268,100 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v32
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(46)
+; GFX9-NEXT: s_waitcnt vmcnt(62)
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v31
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v31
+; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[15:16]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v30
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v30
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v30
+; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[13:14]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v29
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v29
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v28
+; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[11:12]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v28
+; GFX9-NEXT: v_lshrrev_b64 v[51:52], 24, v[9:10]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v28
+; GFX9-NEXT: v_lshrrev_b64 v[52:53], 24, v[7:8]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[31:32]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v27
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v26
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[29:30]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v26
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v25
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[27:28]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v24
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v24
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[25:26]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v23
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v23
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v22
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[23:24]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v22
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v22
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[21:22]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v21
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v20
-; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[15:16]
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v20
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[13:14]
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[11:12]
-; GFX9-NEXT: v_lshrrev_b64 v[51:52], 24, v[9:10]
-; GFX9-NEXT: v_lshrrev_b64 v[52:53], 24, v[7:8]
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[31:32]
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[29:30]
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[27:28]
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[25:26]
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[23:24]
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[21:22]
; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[5:6]
; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v20
; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[3:4]
; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[19:20]
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 8, v10
@@ -190582,6 +190377,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v58, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 8, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v56, 24, v32
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 8, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v19
@@ -190607,7 +190403,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[33:34], 24, v[13:14]
; GFX9-NEXT: v_pk_add_f16 v32, v32, s6 op_sel_hi:[1,0]
-; GFX9-NEXT: s_waitcnt vmcnt(18)
+; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: v_pk_add_f16 v31, v31, s6 op_sel_hi:[1,0]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
@@ -191633,7 +191429,11 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v64f16_to_v128i8:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x2
+; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:88
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:84
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:80
@@ -191654,10 +191454,6 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:20
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:12
-; GFX11-FAKE16-NEXT: s_clause 0x2
-; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
-; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr74
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr72
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
@@ -192293,7 +192089,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:12
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:20
@@ -194483,8 +194279,6 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v14, 8, v7
; VI-NEXT: v_lshrrev_b64 v[7:8], 24, v[7:8]
-; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v10
@@ -194492,6 +194286,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v9
; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v13
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v12
; VI-NEXT: v_lshrrev_b64 v[13:14], 24, v[12:13]
@@ -194499,12 +194294,6 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v12, 8, v1
; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[1:2]
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v16
-; VI-NEXT: v_lshrrev_b32_e32 v8, 8, v19
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v4
@@ -194512,14 +194301,20 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v3
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[3:4]
+; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b64 v[9:10], 24, v[9:10]
+; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v16
; VI-NEXT: v_lshrrev_b64 v[16:17], 24, v[15:16]
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v8, 8, v19
; VI-NEXT: v_lshrrev_b32_e32 v10, 8, v18
; VI-NEXT: v_lshrrev_b64 v[17:18], 24, v[18:19]
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v31
+; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v15
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v8, 8, v35
; VI-NEXT: v_lshrrev_b64 v[18:19], 24, v[34:35]
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
@@ -194554,6 +194349,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_bfe_u32 v11, v52, 8, 8
; VI-NEXT: v_lshrrev_b32_e32 v46, 8, v33
; VI-NEXT: v_lshrrev_b32_e32 v58, 8, v32
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v32, 8, v29
; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v28
@@ -195713,42 +195509,42 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[9:10]
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[11:12]
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[13:14]
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[21:22]
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 24, v4
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v3
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 8, v3
+; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[11:12]
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 24, v6
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v6
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 8, v6
+; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[13:14]
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 8, v5
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 24, v8
+; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[21:22]
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v8
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 8, v8
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v7
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v26
@@ -196715,7 +196511,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_or_saveexec_b32 s4, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:76
; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:80
; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:84
@@ -196750,7 +196546,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_writelane_b32 v76, s101, 5
; GFX11-NEXT: s_mov_b32 s99, 0
; GFX11-NEXT: s_and_b32 s42, vcc_lo, exec_lo
-; GFX11-NEXT: s_clause 0x12
+; GFX11-NEXT: s_clause 0x12 ; 76-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:72
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:68
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:64
@@ -197669,7 +197465,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: scratch_store_b128 v0, v[11:14], off offset:80
; GFX11-NEXT: scratch_store_b128 v0, v[7:10], off offset:96
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
-; GFX11-NEXT: s_clause 0x12
+; GFX11-NEXT: s_clause 0x12 ; 76-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v74, off, s32
; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:8
@@ -197731,7 +197527,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_readlane_b32 s31, v75, 1
; GFX11-NEXT: v_readlane_b32 s30, v75, 0
; GFX11-NEXT: s_or_saveexec_b32 s0, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:76
; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:80
; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:84
@@ -197782,11 +197578,11 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
+; SI-NEXT: v_mov_b32_e32 v54, v15
; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v54, v15
; SI-NEXT: v_mov_b32_e32 v57, v5
; SI-NEXT: v_mov_b32_e32 v41, v3
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:392
@@ -197876,7 +197672,30 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v15
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v27
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v29
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v31
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160
+; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v32
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v33
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v34
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:196
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:220
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:192
; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v11
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -197884,28 +197703,21 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v21
-; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v27
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:96
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v17
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v29
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v23
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v31
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v32
; SI-NEXT: v_lshlrev_b32_e32 v31, 8, v2
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v18
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v26
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v34, 8, v10
+; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr23
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr27
@@ -197913,240 +197725,211 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160
+; SI-NEXT: ; implicit-def: $vgpr10
+; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:88
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:184
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:88
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:80
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:216
; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:112
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:140
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:180
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:176
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v33
-; SI-NEXT: ; implicit-def: $vgpr33
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v34
-; SI-NEXT: v_lshlrev_b32_e32 v34, 8, v10
-; SI-NEXT: ; implicit-def: $vgpr10
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4
; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:172
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:196
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:220
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:192
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:216
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:212
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:208
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:204
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:228
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:252
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:224
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:172
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:204
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:248
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:244
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152
; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v9
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:236
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:260
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:284
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:256
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:280
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:80
+; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:244
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:276
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:272
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:280
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v11
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:268
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:292
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:316
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:236
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:312
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:308
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:304
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:276
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:272
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v9
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v11
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:300
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:324
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:348
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:320
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:308
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:304
+; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:268
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v4
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v63, 8, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:344
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:340
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:336
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v60, 24, v9
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:332
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v11
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:356
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:380
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:352
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:300
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v56, 24, v4
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:376
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:340
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:336
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v60, 24, v9
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:388
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:384
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:372
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:368
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:332
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_lshlrev_b32_e32 v32, 24, v8
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:8
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:364
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:388
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:384
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v26
-; SI-NEXT: ; implicit-def: $vgpr26
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_lshlrev_b32_e32 v32, 24, v8
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v11
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:8
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:120
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:40
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:140
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:112
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76
@@ -198158,15 +197941,19 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:56
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v35
; SI-NEXT: ; implicit-def: $vgpr35
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:56
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v3
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
@@ -198202,7 +197989,7 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:976 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:48
@@ -198682,15 +198469,15 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; SI-NEXT: v_or_b32_e32 v25, v6, v13
; SI-NEXT: v_and_b32_e32 v6, 0xffff, v7
; SI-NEXT: v_or_b32_e32 v6, v6, v5
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:980 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
; SI-NEXT: v_alignbit_b32 v7, v25, v5, 16
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:980 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_and_b32_e32 v6, 0xffff, v26
; SI-NEXT: v_or_b32_e32 v6, v6, v11
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5
; SI-NEXT: v_or_b32_e32 v5, v5, v8
; SI-NEXT: s_waitcnt expcnt(0)
@@ -200009,8 +199796,8 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v25
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v29
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v55, 8, v3
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b16_e32 v40, 8, v5
; VI-NEXT: v_lshlrev_b16_e32 v41, 8, v7
; VI-NEXT: v_lshlrev_b16_e32 v50, 8, v9
@@ -200106,13 +199893,25 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:52
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -200240,14 +200039,19 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v38, 8, v0
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_lshlrev_b16_e32 v39, 8, v1
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_lshlrev_b16_e32 v49, 8, v2
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshlrev_b16_e32 v51, 8, v3
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:356
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_lshlrev_b16_e32 v51, 8, v3
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b16_e32 v53, 8, v0
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -200255,26 +200059,6 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_lshlrev_b16_e32 v53, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:52
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -200283,35 +200067,57 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_or_b32_sdwa v0, v0, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: s_waitcnt vmcnt(12)
; VI-NEXT: v_or_b32_sdwa v1, v1, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(11)
; VI-NEXT: v_or_b32_sdwa v3, v3, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v2, v2, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: v_or_b32_sdwa v2, v2, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_or_b32_sdwa v4, v4, v48 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_or_b32_sdwa v10, v61, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v5, v5, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v11, v57, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_or_b32_sdwa v6, v6, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v12, v59, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v13, v47, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v14, v45, v14 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v15, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: ; implicit-def: $vgpr61
+; VI-NEXT: ; implicit-def: $vgpr57
+; VI-NEXT: ; implicit-def: $vgpr59
+; VI-NEXT: ; implicit-def: $vgpr47
+; VI-NEXT: ; implicit-def: $vgpr45
+; VI-NEXT: ; implicit-def: $vgpr43
; VI-NEXT: ; implicit-def: $vgpr54
; VI-NEXT: ; implicit-def: $vgpr55
; VI-NEXT: ; implicit-def: $vgpr40
; VI-NEXT: ; implicit-def: $vgpr41
; VI-NEXT: ; implicit-def: $vgpr48
; VI-NEXT: ; implicit-def: $vgpr36
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v6, v6, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: ; implicit-def: $vgpr34
+; VI-NEXT: v_or_b32_sdwa v31, v31, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: ; implicit-def: $vgpr49
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v2, v2, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -200344,39 +200150,19 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v6, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: ; implicit-def: $vgpr53
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_or_b32_sdwa v10, v61, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_or_b32_sdwa v11, v57, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_or_b32_sdwa v12, v59, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_or_b32_sdwa v13, v47, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v14, v45, v14 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr61
-; VI-NEXT: ; implicit-def: $vgpr57
-; VI-NEXT: ; implicit-def: $vgpr59
-; VI-NEXT: ; implicit-def: $vgpr47
-; VI-NEXT: ; implicit-def: $vgpr45
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v15, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr43
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v8, v62, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -200542,17 +200328,9 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v28, v28, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v29, v29, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v31, v31, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr49
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: ; implicit-def: $vgpr53
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v30, v30, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v29, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -201237,8 +201015,8 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v27
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v29
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v54, 8, v3
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b16_e32 v41, 8, v5
; GFX9-NEXT: v_lshlrev_b16_e32 v40, 8, v7
; GFX9-NEXT: v_lshlrev_b16_e32 v51, 8, v9
@@ -201349,13 +201127,27 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:208
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:188
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v1
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(16)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:196
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v2
@@ -201488,14 +201280,19 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v37, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_lshlrev_b16_e32 v49, 8, v1
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
+; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_lshlrev_b16_e32 v48, 8, v2
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v52, 8, v3
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:356
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v52, 8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b16_e32 v53, 8, v0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:364
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -201503,26 +201300,6 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:372
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:384
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:380
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v53, 8, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:52
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -201531,36 +201308,62 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; GFX9-NEXT: s_mov_b32 s6, 0x5040100
+; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v55 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
+; GFX9-NEXT: s_waitcnt vmcnt(14)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_or_b32_sdwa v2, v2, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v3, v3, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v0, v1, v0, s6
+; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: v_or_b32_sdwa v3, v3, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v1, v3, v2, s6
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(10)
+; GFX9-NEXT: v_or_b32_sdwa v10, v60, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v11, v56, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v4, v4, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_or_b32_sdwa v12, v58, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v5, v5, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v13, v46, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v6, v6, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v14, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v31, v31, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: ; implicit-def: $vgpr60
+; GFX9-NEXT: ; implicit-def: $vgpr56
+; GFX9-NEXT: ; implicit-def: $vgpr58
+; GFX9-NEXT: ; implicit-def: $vgpr46
+; GFX9-NEXT: ; implicit-def: $vgpr44
; GFX9-NEXT: ; implicit-def: $vgpr55
; GFX9-NEXT: ; implicit-def: $vgpr54
; GFX9-NEXT: ; implicit-def: $vgpr41
; GFX9-NEXT: ; implicit-def: $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr38
; GFX9-NEXT: ; implicit-def: $vgpr35
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v6, v6, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: ; implicit-def: $vgpr33
+; GFX9-NEXT: ; implicit-def: $vgpr48
+; GFX9-NEXT: v_or_b32_sdwa v15, v42, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: ; implicit-def: $vgpr42
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v2, v2, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -201583,49 +201386,25 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_or_b32_sdwa v5, v5, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v5, v6, v5, s6
; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
; GFX9-NEXT: ; implicit-def: $vgpr34
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v6, v6, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v6, v6, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v6, v7, v6, s6
; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: ; implicit-def: $vgpr53
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v7, v8, v7, s6
; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_or_b32_sdwa v10, v60, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_or_b32_sdwa v11, v56, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_or_b32_sdwa v12, v58, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v13, v46, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v14, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: ; implicit-def: $vgpr60
-; GFX9-NEXT: ; implicit-def: $vgpr56
-; GFX9-NEXT: ; implicit-def: $vgpr58
-; GFX9-NEXT: ; implicit-def: $vgpr46
-; GFX9-NEXT: ; implicit-def: $vgpr44
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v15, v42, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: ; implicit-def: $vgpr42
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v8, v63, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -201791,17 +201570,9 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v28, v29, v28, s6
; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v29, v29, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v31, v31, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v32, v32, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: ; implicit-def: $vgpr48
-; GFX9-NEXT: ; implicit-def: $vgpr53
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v30, v30, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_perm_b32 v29, v30, v29, s6
@@ -203147,7 +202918,7 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64i16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:580
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:576
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:572
@@ -203180,7 +202951,7 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v93, s32 offset:464
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v94, s32 offset:460
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v95, s32 offset:456
-; GFX11-FAKE16-NEXT: s_clause 0xf
+; GFX11-FAKE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v104, s32 offset:452
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v105, s32 offset:448
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v106, s32 offset:444
@@ -204009,7 +203780,7 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: v_perm_b32 v31, v116, v31, 0x5040100
; GFX11-FAKE16-NEXT: .LBB96_4: ; %end
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_clause 0x1f
+; GFX11-FAKE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v127, off, s32 offset:392
; GFX11-FAKE16-NEXT: scratch_load_b32 v126, off, s32 offset:396
; GFX11-FAKE16-NEXT: scratch_load_b32 v125, off, s32 offset:400
@@ -204042,7 +203813,7 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:508
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:512
; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:516
-; GFX11-FAKE16-NEXT: s_clause 0xf
+; GFX11-FAKE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:520
; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:524
; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:528
@@ -204087,7 +203858,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[4:5]
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:332
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:328
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324
@@ -204097,9 +203867,9 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:308
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:304
; SI-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
-; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_writelane_b32 v41, s30, 0
-; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_writelane_b32 v43, s29, 0
; SI-NEXT: v_writelane_b32 v43, s28, 1
; SI-NEXT: v_writelane_b32 v43, s27, 2
@@ -204148,6 +203918,12 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_writelane_b32 v41, s96, 32
; SI-NEXT: v_writelane_b32 v41, s97, 33
; SI-NEXT: v_writelane_b32 v41, s98, 34
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:164
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:160
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:156
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:152
; SI-NEXT: v_readfirstlane_b32 s39, v26
; SI-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
; SI-NEXT: v_readfirstlane_b32 s47, v12
@@ -204170,9 +203946,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s59, v28
; SI-NEXT: v_readfirstlane_b32 s60, v27
; SI-NEXT: v_readfirstlane_b32 s11, v1
-; SI-NEXT: v_readfirstlane_b32 s12, v2
-; SI-NEXT: v_readfirstlane_b32 s13, v9
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: v_writelane_b32 v43, s4, 14
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:300
@@ -204181,30 +203955,28 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:288
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:284
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:280
-; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_readfirstlane_b32 s4, v32
; SI-NEXT: v_writelane_b32 v43, s4, 15
-; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_readfirstlane_b32 s4, v33
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:276
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:272
; SI-NEXT: v_writelane_b32 v43, s4, 16
-; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s4, v34
; SI-NEXT: v_writelane_b32 v43, s4, 17
-; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_readfirstlane_b32 s4, v35
; SI-NEXT: v_writelane_b32 v43, s4, 18
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_readfirstlane_b32 s44, v36
-; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_readfirstlane_b32 s90, v37
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:268
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:264
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:260
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:256
-; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_readfirstlane_b32 s6, v38
+; SI-NEXT: v_readfirstlane_b32 s12, v2
+; SI-NEXT: v_readfirstlane_b32 s13, v9
; SI-NEXT: v_readfirstlane_b32 s14, v10
; SI-NEXT: v_readfirstlane_b32 s15, v8
; SI-NEXT: v_readfirstlane_b32 s18, v7
@@ -204218,6 +203990,10 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s77, v15
; SI-NEXT: v_readfirstlane_b32 s38, v25
; SI-NEXT: v_writelane_b32 v41, s99, 35
+; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: v_readfirstlane_b32 s93, v55
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: v_readfirstlane_b32 s95, v40
; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: v_writelane_b32 v43, s4, 19
@@ -204294,39 +204070,35 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_writelane_b32 v43, s4, 30
; SI-NEXT: v_readfirstlane_b32 s4, v32
; SI-NEXT: v_writelane_b32 v43, s4, 31
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:164
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:160
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:156
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:152
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s4, v34
; SI-NEXT: v_writelane_b32 v43, s4, 32
+; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_readfirstlane_b32 s9, v35
-; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_readfirstlane_b32 s4, v37
; SI-NEXT: v_writelane_b32 v43, s4, 33
; SI-NEXT: v_readfirstlane_b32 s10, v36
-; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: v_writelane_b32 v43, s4, 34
-; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_readfirstlane_b32 s4, v38
; SI-NEXT: v_writelane_b32 v43, s4, 35
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_readfirstlane_b32 s4, v39
; SI-NEXT: v_writelane_b32 v43, s4, 36
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_readfirstlane_b32 s69, v48
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_readfirstlane_b32 s30, v49
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_readfirstlane_b32 s16, v50
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_readfirstlane_b32 s36, v51
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:148
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:144
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_readfirstlane_b32 s4, v33
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:136
@@ -204340,7 +204112,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:104
; SI-NEXT: v_writelane_b32 v43, s4, 37
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_readfirstlane_b32 s4, v52
; SI-NEXT: v_writelane_b32 v43, s4, 38
; SI-NEXT: v_readfirstlane_b32 s4, v53
@@ -204367,9 +204139,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_writelane_b32 v43, s43, 58
; SI-NEXT: v_writelane_b32 v43, s76, 59
; SI-NEXT: v_writelane_b32 v43, s77, 60
-; SI-NEXT: v_readfirstlane_b32 s93, v55
-; SI-NEXT: s_waitcnt vmcnt(13)
-; SI-NEXT: v_readfirstlane_b32 s95, v40
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_readfirstlane_b32 s17, v33
; SI-NEXT: s_waitcnt vmcnt(9)
@@ -205938,33 +205707,53 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v24
; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v26
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:124
+; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:132
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:140
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:148
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:156
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:164
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:172
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:180
+; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:188
+; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:196
+; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:204
+; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:212
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220
+; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:228
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:236
+; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:244
+; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:252
+; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260
+; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:268
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:276
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:284
+; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:292
+; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:300
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:308
+; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:316
+; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:324
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v16
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v18
; VI-NEXT: v_lshlrev_b32_e32 v20, 8, v20
; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v22
-; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v36, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264
@@ -206009,52 +205798,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:68
; VI-NEXT: s_waitcnt vmcnt(10)
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:76
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:84
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:92
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:100
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:108
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:116
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:124
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:132
-; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:140
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:148
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:156
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:164
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:172
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:180
-; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:188
-; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:196
-; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:204
-; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:212
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220
-; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:228
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:236
-; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:244
-; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:252
-; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260
-; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:268
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:276
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:284
-; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:292
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:300
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:308
-; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:316
-; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:324
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
@@ -206074,6 +205817,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
@@ -206082,7 +205826,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
@@ -206114,6 +205857,25 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:76
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:84
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:92
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:100
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:108
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:116
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
; VI-NEXT: s_cbranch_scc0 .LBB97_2
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
@@ -206138,15 +205900,18 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_or_b32_sdwa v2, v2, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v2, v8
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
@@ -206196,10 +205961,11 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -206207,50 +205973,37 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v1, v48, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v49, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v1, v60, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v2, v35, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(9)
-; VI-NEXT: v_or_b32_sdwa v3, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_or_b32_sdwa v0, v42, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v1, v41, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v39, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v42, v43
; VI-NEXT: v_mov_b32_e32 v43, v37
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
@@ -206265,13 +206018,12 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v1, v24, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v34, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
@@ -206293,21 +206045,28 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v31, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v54, v33
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v56, v1
; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v2, v35, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v3, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v63, v39
+; VI-NEXT: v_mov_b32_e32 v54, v33
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_mov_b32_e32 v57, v0
; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -206325,11 +206084,10 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v53, v35
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -206362,7 +206120,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_branch .LBB97_3
; VI-NEXT: .LBB97_2:
; VI-NEXT: v_mov_b32_e32 v47, v54
-; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
@@ -206383,6 +206140,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v58, v7
; VI-NEXT: v_mov_b32_e32 v57, v5
; VI-NEXT: v_mov_b32_e32 v56, v3
@@ -206974,29 +206732,51 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
+; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:156
+; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:164
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:172
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:180
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:196
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:204
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:220
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:236
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244
+; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252
+; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:260
+; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:268
+; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:276
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:292
+; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:300
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:316
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:324
; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v46
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: s_waitcnt vmcnt(29)
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
@@ -207060,82 +206840,42 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:124
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:132
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:140
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:148
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:156
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:164
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:172
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:180
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:196
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:204
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:220
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:236
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252
-; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:260
-; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:268
-; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:276
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:292
-; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:300
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:316
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:324
-; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(22)
; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(24)
; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(31)
; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(35)
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(35)
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
@@ -207156,6 +206896,13 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(55)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:140
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:148
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
; GFX9-NEXT: s_cbranch_scc0 .LBB97_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
@@ -207409,14 +207156,13 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_lshl_or_b32 v30, v1, 16, v0
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v31, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
@@ -207426,7 +207172,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: .LBB97_2:
; GFX9-NEXT: v_mov_b32_e32 v58, v50
; GFX9-NEXT: v_mov_b32_e32 v45, v59
-; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
@@ -207438,6 +207183,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v34, v35
+; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v49, v39
@@ -207903,7 +207649,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-LABEL: bitcast_v128i8_to_v64i16_scalar:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0x1e
+; GFX11-TRUE16-NEXT: s_clause 0x1e ; 124-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:440
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:436
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:432
@@ -208633,7 +208379,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_mov_b16_e64 v30.h, v182.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v43.l
; GFX11-TRUE16-NEXT: .LBB97_3: ; %end
-; GFX11-TRUE16-NEXT: s_clause 0x1e
+; GFX11-TRUE16-NEXT: s_clause 0x1e ; 124-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:324
; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:328
@@ -208675,7 +208421,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64i16_scalar:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x1e
+; GFX11-FAKE16-NEXT: s_clause 0x1e ; 124-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:440
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:436
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:432
@@ -209459,7 +209205,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v30, v30, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v31, 16, v36
; GFX11-FAKE16-NEXT: .LBB97_3: ; %end
-; GFX11-FAKE16-NEXT: s_clause 0x1e
+; GFX11-FAKE16-NEXT: s_clause 0x1e ; 124-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v94, off, s32 offset:320
; GFX11-FAKE16-NEXT: scratch_load_b32 v93, off, s32 offset:324
; GFX11-FAKE16-NEXT: scratch_load_b32 v92, off, s32 offset:328
@@ -209562,100 +209308,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:132
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:128
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v28
-; SI-NEXT: ; kill: killed $vgpr23
-; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: ; implicit-def: $vgpr46
-; SI-NEXT: ; implicit-def: $vgpr45
-; SI-NEXT: ; implicit-def: $vgpr44
-; SI-NEXT: ; implicit-def: $vgpr43
-; SI-NEXT: ; implicit-def: $vgpr42
-; SI-NEXT: ; implicit-def: $vgpr41
-; SI-NEXT: ; implicit-def: $vgpr40
-; SI-NEXT: ; implicit-def: $vgpr55
-; SI-NEXT: ; implicit-def: $vgpr54
-; SI-NEXT: ; implicit-def: $vgpr53
-; SI-NEXT: ; implicit-def: $vgpr52
-; SI-NEXT: ; implicit-def: $vgpr51
-; SI-NEXT: ; implicit-def: $vgpr50
-; SI-NEXT: ; implicit-def: $vgpr49
-; SI-NEXT: ; implicit-def: $vgpr48
-; SI-NEXT: ; implicit-def: $vgpr39
-; SI-NEXT: ; implicit-def: $vgpr38
-; SI-NEXT: ; implicit-def: $vgpr37
-; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: ; implicit-def: $vgpr35
-; SI-NEXT: ; implicit-def: $vgpr34
-; SI-NEXT: ; implicit-def: $vgpr33
-; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: ; implicit-def: $vgpr32
-; SI-NEXT: ; kill: killed $vgpr23
-; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v13
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v19
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:120
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:116
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:112
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:104
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:88
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:84
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:80
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v56
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:72
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:56
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:52
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:48
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v9
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v6
@@ -209785,14 +209437,29 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v9
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:120
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_waitcnt vmcnt(13)
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; kill: killed $vgpr2
@@ -209809,13 +209476,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; kill: killed $vgpr2
@@ -209870,12 +209530,39 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v11
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v57
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v58
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v62
+; SI-NEXT: ; implicit-def: $vgpr23
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v13
+; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v19
+; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v28
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
+; SI-NEXT: ; kill: killed $vgpr23
+; SI-NEXT: ; implicit-def: $vgpr23
+; SI-NEXT: ; implicit-def: $vgpr46
+; SI-NEXT: ; implicit-def: $vgpr45
+; SI-NEXT: ; implicit-def: $vgpr44
+; SI-NEXT: ; implicit-def: $vgpr43
+; SI-NEXT: ; implicit-def: $vgpr42
+; SI-NEXT: ; implicit-def: $vgpr41
+; SI-NEXT: ; implicit-def: $vgpr40
+; SI-NEXT: ; implicit-def: $vgpr55
+; SI-NEXT: ; implicit-def: $vgpr54
+; SI-NEXT: ; implicit-def: $vgpr53
+; SI-NEXT: ; implicit-def: $vgpr52
+; SI-NEXT: ; implicit-def: $vgpr51
+; SI-NEXT: ; implicit-def: $vgpr50
+; SI-NEXT: ; implicit-def: $vgpr49
+; SI-NEXT: ; implicit-def: $vgpr48
+; SI-NEXT: ; implicit-def: $vgpr39
+; SI-NEXT: ; implicit-def: $vgpr38
+; SI-NEXT: ; implicit-def: $vgpr37
+; SI-NEXT: ; implicit-def: $vgpr36
+; SI-NEXT: ; implicit-def: $vgpr35
+; SI-NEXT: ; implicit-def: $vgpr34
+; SI-NEXT: ; implicit-def: $vgpr33
+; SI-NEXT: ; implicit-def: $vgpr31
+; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr18
@@ -209885,36 +209572,81 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr2
+; SI-NEXT: ; kill: killed $vgpr23
+; SI-NEXT: ; implicit-def: $vgpr23
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:116
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:112
; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16
+; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v62
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:104
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v60
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v59
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v63
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:88
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:100
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:96
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:84
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:80
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v61
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:64
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v56
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:72
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:32
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:56
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:48
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4
@@ -209936,6 +209668,18 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v47
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:40
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v58
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v11
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v57
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB98_2
@@ -211555,22 +211299,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; VI-LABEL: bitcast_v64i16_to_v128i8:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:8
; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32
@@ -211588,6 +211316,22 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v9
; VI-NEXT: ; kill: killed $vgpr35
; VI-NEXT: ; implicit-def: $vgpr35
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v7
; VI-NEXT: ; kill: killed $vgpr35
@@ -211884,14 +211628,12 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v9, v8
; VI-NEXT: v_lshrrev_b64 v[7:8], 24, v[7:8]
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v7, v5
; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v7, v6
; VI-NEXT: v_lshrrev_b64 v[5:6], 24, v[5:6]
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v5, 24, v4
@@ -211923,10 +211665,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; VI-NEXT: v_mov_b32_e32 v3, v2
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[36:37]
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v5, v4
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v1, 24, v30
@@ -211997,10 +211735,16 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[19:20]
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v1, v18
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v5, v4
; VI-NEXT: v_lshrrev_b64 v[41:42], 24, v[21:22]
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v1, v46
; VI-NEXT: v_lshrrev_b64 v[45:46], 24, v[17:18]
+; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v35, 24, v26
; VI-NEXT: v_lshrrev_b32_e32 v39, 24, v24
; VI-NEXT: v_lshrrev_b32_e32 v58, 24, v22
@@ -212201,9 +211945,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v32, 8, v15
; VI-NEXT: v_lshrrev_b64 v[15:16], 24, v[15:16]
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: v_or_b32_e32 v13, v41, v13
@@ -212211,38 +211952,35 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v15, 8, v13
; VI-NEXT: v_lshrrev_b64 v[13:14], 24, v[13:14]
-; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v13, 8, v12
; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v13, 8, v11
; VI-NEXT: v_lshrrev_b64 v[11:12], 24, v[11:12]
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v11, 8, v10
; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v11, 8, v9
; VI-NEXT: v_lshrrev_b64 v[9:10], 24, v[9:10]
-; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v9, 8, v8
; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v9, 8, v7
; VI-NEXT: v_lshrrev_b64 v[7:8], 24, v[7:8]
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v6
; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v5
; VI-NEXT: v_lshrrev_b64 v[5:6], 24, v[5:6]
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v4
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v3
; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4]
@@ -212255,8 +211993,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v36
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[36:37]
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v30
@@ -212325,6 +212061,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; VI-NEXT: v_mov_b32_e32 v49, v53
; VI-NEXT: v_mov_b32_e32 v53, v38
; VI-NEXT: v_mov_b32_e32 v38, v55
+; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v32, 8, v18
; VI-NEXT: v_lshrrev_b32_e32 v42, 8, v17
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
@@ -212336,6 +212073,13 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; VI-NEXT: v_mov_b32_e32 v55, v31
; VI-NEXT: v_bfe_u32 v61, v53, 8, 8
; VI-NEXT: v_bfe_u32 v31, v38, 8, 8
+; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; VI-NEXT: .LBB98_4: ; %end
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
@@ -212790,22 +212534,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX9-LABEL: bitcast_v64i16_to_v128i8:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
@@ -212868,6 +212596,23 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX9-NEXT: ; implicit-def: $vgpr50
; GFX9-NEXT: ; kill: killed $vgpr50
; GFX9-NEXT: ; implicit-def: $vgpr50
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr44
; GFX9-NEXT: ; kill: killed $vgpr50
; GFX9-NEXT: ; implicit-def: $vgpr50
@@ -212898,7 +212643,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX9-NEXT: ; implicit-def: $vgpr52
; GFX9-NEXT: ; implicit-def: $vgpr51
; GFX9-NEXT: ; implicit-def: $vgpr50
-; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
@@ -212932,7 +212676,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(17)
+; GFX9-NEXT: s_waitcnt vmcnt(33)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; GFX9-NEXT: ; implicit-def: $vgpr33
; GFX9-NEXT: ; kill: killed $vgpr33
@@ -213055,101 +212799,100 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v32
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(46)
+; GFX9-NEXT: s_waitcnt vmcnt(62)
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v31
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v31
+; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[15:16]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v30
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v30
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v30
+; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[13:14]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v29
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v29
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v28
+; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[11:12]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v28
+; GFX9-NEXT: v_lshrrev_b64 v[51:52], 24, v[9:10]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v28
+; GFX9-NEXT: v_lshrrev_b64 v[52:53], 24, v[7:8]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v27
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[31:32]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v27
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v26
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[29:30]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v26
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v25
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[27:28]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v24
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v24
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[25:26]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v23
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v23
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v22
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[23:24]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v22
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v22
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v21
+; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[21:22]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v21
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v20
-; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[15:16]
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v20
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[13:14]
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[50:51], 24, v[11:12]
-; GFX9-NEXT: v_lshrrev_b64 v[51:52], 24, v[9:10]
-; GFX9-NEXT: v_lshrrev_b64 v[52:53], 24, v[7:8]
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[31:32]
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[29:30]
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[27:28]
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[25:26]
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[23:24]
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[21:22]
; GFX9-NEXT: v_lshrrev_b64 v[40:41], 24, v[5:6]
; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v20
; GFX9-NEXT: v_lshrrev_b64 v[41:42], 24, v[3:4]
; GFX9-NEXT: v_lshrrev_b64 v[53:54], 24, v[19:20]
; GFX9-NEXT: v_lshrrev_b32_e32 v59, 8, v10
@@ -213165,6 +212908,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v58, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 8, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v56, 24, v32
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 8, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v19
@@ -213189,7 +212933,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[33:34], 24, v[13:14]
; GFX9-NEXT: v_pk_add_u16 v32, v32, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: s_waitcnt vmcnt(18)
+; GFX9-NEXT: s_waitcnt vmcnt(34)
; GFX9-NEXT: v_pk_add_u16 v31, v31, 3 op_sel_hi:[1,0]
; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
@@ -214215,7 +213959,11 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-FAKE16-LABEL: bitcast_v64i16_to_v128i8:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x2
+; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
+; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Spill
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v40, s32 offset:88
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v41, s32 offset:84
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v42, s32 offset:80
@@ -214236,10 +213984,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v73, s32 offset:20
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v74, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_store_b32 off, v75, s32 offset:12
-; GFX11-FAKE16-NEXT: s_clause 0x2
-; GFX11-FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:8
-; GFX11-FAKE16-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX11-FAKE16-NEXT: scratch_load_b32 v31, off, s32
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr74
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr72
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr64
@@ -214875,7 +214619,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96
; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112
-; GFX11-FAKE16-NEXT: s_clause 0x13
+; GFX11-FAKE16-NEXT: s_clause 0x13 ; 80-byte Folded Reload
; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:12
; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:16
; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:20
@@ -215014,26 +214758,26 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s91, v32
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_readfirstlane_b32 s93, v33
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16
-; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_readfirstlane_b32 s55, v34
-; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_readfirstlane_b32 s17, v35
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_readfirstlane_b32 s95, v36
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_readfirstlane_b32 s35, v37
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16
; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s83, v38
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:80
@@ -215046,39 +214790,34 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s39, v1
; SI-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
; SI-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
-; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_readfirstlane_b32 s77, v31
-; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_readfirstlane_b32 s38, v32
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_readfirstlane_b32 s48, v33
-; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_readfirstlane_b32 s50, v39
; SI-NEXT: s_waitcnt vmcnt(8)
-; SI-NEXT: v_readfirstlane_b32 s76, v48
+; SI-NEXT: v_readfirstlane_b32 s77, v31
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_readfirstlane_b32 s30, v49
+; SI-NEXT: v_readfirstlane_b32 s38, v32
; SI-NEXT: s_waitcnt vmcnt(6)
-; SI-NEXT: v_readfirstlane_b32 s34, v50
+; SI-NEXT: v_readfirstlane_b32 s48, v33
; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_readfirstlane_b32 s36, v51
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_readfirstlane_b32 s99, v34
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_readfirstlane_b32 s50, v39
; SI-NEXT: v_readfirstlane_b32 s90, v35
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_readfirstlane_b32 s92, v36
; SI-NEXT: v_writelane_b32 v41, s90, 11
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_readfirstlane_b32 s94, v37
; SI-NEXT: v_writelane_b32 v41, s92, 12
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_readfirstlane_b32 s30, v49
; SI-NEXT: v_writelane_b32 v41, s94, 13
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_readfirstlane_b32 s34, v50
; SI-NEXT: v_writelane_b32 v41, s30, 14
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_readfirstlane_b32 s36, v51
; SI-NEXT: v_writelane_b32 v41, s34, 15
; SI-NEXT: v_writelane_b32 v41, s36, 16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v38
; SI-NEXT: v_writelane_b32 v41, s38, 17
+; SI-NEXT: v_readfirstlane_b32 s76, v48
+; SI-NEXT: v_readfirstlane_b32 s99, v34
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_writelane_b32 v41, s48, 18
; SI-NEXT: v_writelane_b32 v41, s50, 19
@@ -218060,48 +217799,48 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[9:10]
+; GFX9-NEXT: v_lshrrev_b32_e32 v19, 24, v4
+; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v3
; GFX9-NEXT: v_pk_add_u16 v12, s41, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, s40, 3 op_sel_hi:[1,0]
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[11:12]
-; GFX9-NEXT: v_pk_add_u16 v14, s43, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v13, s42, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[13:14]
-; GFX9-NEXT: v_pk_add_u16 v22, s45, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v21, s44, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[21:22]
-; GFX9-NEXT: v_lshrrev_b32_e32 v19, 24, v4
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v3
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 8, v3
+; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[11:12]
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 24, v6
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v6
+; GFX9-NEXT: v_pk_add_u16 v14, s43, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v13, s42, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 8, v6
+; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[13:14]
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 8, v5
+; GFX9-NEXT: v_pk_add_u16 v22, s45, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v21, s44, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 24, v8
+; GFX9-NEXT: v_lshrrev_b64 v[15:16], 24, v[21:22]
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v8
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 8, v8
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v7
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v26
@@ -219068,7 +218807,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_or_saveexec_b32 s4, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:76
; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:80
; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:84
@@ -219103,7 +218842,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v76, s101, 5
; GFX11-NEXT: s_mov_b32 s99, 0
; GFX11-NEXT: s_and_b32 s42, vcc_lo, exec_lo
-; GFX11-NEXT: s_clause 0x12
+; GFX11-NEXT: s_clause 0x12 ; 76-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:72
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:68
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:64
@@ -220022,7 +219761,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: scratch_store_b128 v0, v[11:14], off offset:80
; GFX11-NEXT: scratch_store_b128 v0, v[7:10], off offset:96
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
-; GFX11-NEXT: s_clause 0x12
+; GFX11-NEXT: s_clause 0x12 ; 76-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v74, off, s32
; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:4
; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:8
@@ -220084,7 +219823,7 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_readlane_b32 s31, v75, 1
; GFX11-NEXT: v_readlane_b32 s30, v75, 0
; GFX11-NEXT: s_or_saveexec_b32 s0, -1
-; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: s_clause 0x3 ; 16-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:76
; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:80
; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:84
@@ -221471,6 +221210,8 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
; VI-LABEL: bitcast_v64bf16_to_v64f16:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -221487,9 +221228,7 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -221738,7 +221477,6 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
; VI-NEXT: v_or_b32_e32 v40, 0x400000, v30
; VI-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
; VI-NEXT: v_cndmask_b32_e32 v30, v55, v40, vcc
-; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_lshlrev_b32_e32 v55, 16, v31
; VI-NEXT: v_add_f32_e32 v55, 0x40c00000, v55
; VI-NEXT: v_bfe_u32 v40, v55, 16, 1
@@ -222104,6 +221842,9 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
; GFX9-LABEL: bitcast_v64bf16_to_v64f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -222120,9 +221861,7 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(17)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -222341,7 +222080,7 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_or_b32_e32 v40, 0x400000, v30
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
; GFX9-NEXT: v_cndmask_b32_e32 v30, v55, v40, vcc
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(17)
; GFX9-NEXT: v_lshlrev_b32_e32 v55, 16, v31
; GFX9-NEXT: v_add_f32_e32 v55, 0x40c00000, v55
; GFX9-NEXT: v_bfe_u32 v40, v55, 16, 1
@@ -222641,7 +222380,7 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-LABEL: bitcast_v64bf16_to_v64f16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:60
@@ -223201,7 +222940,7 @@ define <64 x half> @bitcast_v64bf16_to_v64f16(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, v42 :: v_dual_mov_b32 v11, v43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, v44 :: v_dual_mov_b32 v13, v45
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, v46 :: v_dual_mov_b32 v15, v47
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:8
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:12
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:16
@@ -231398,17 +231137,32 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v13
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; SI-NEXT: v_alignbit_b32 v5, v23, v5, 16
; SI-NEXT: v_alignbit_b32 v2, v21, v2, 16
+; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_alignbit_b32 v2, v20, v6, 16
; SI-NEXT: v_alignbit_b32 v1, v61, v1, 16
+; SI-NEXT: v_alignbit_b32 v5, v23, v5, 16
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_alignbit_b32 v2, v19, v3, 16
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_alignbit_b32 v1, v18, v4, 16
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
@@ -231418,57 +231172,63 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; SI-NEXT: v_alignbit_b32 v7, v24, v7, 16
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v13
; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16
; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17
; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v17
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v10
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
; SI-NEXT: v_alignbit_b32 v25, v45, v8, 16
; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v9
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; SI-NEXT: v_alignbit_b32 v8, v25, v8, 16
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v13
; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16
; SI-NEXT: v_alignbit_b32 v62, v63, v16, 16
; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v33
; SI-NEXT: v_alignbit_b32 v16, v62, v16, 16
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v10
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
; SI-NEXT: v_alignbit_b32 v22, v34, v9, 16
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v11
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; SI-NEXT: v_alignbit_b32 v9, v22, v9, 16
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v11
@@ -231526,31 +231286,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; SI-NEXT: v_alignbit_b32 v15, v41, v15, 16
; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_alignbit_b32 v2, v20, v6, 16
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_alignbit_b32 v2, v19, v3, 16
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_alignbit_b32 v1, v18, v4, 16
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: .LBB104_4: ; %end
; SI-NEXT: s_or_b64 exec, exec, s[4:5]
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
@@ -231855,6 +231592,8 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; VI-LABEL: bitcast_v64bf16_to_v64i16:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -231871,9 +231610,7 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -232122,7 +231859,6 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; VI-NEXT: v_or_b32_e32 v40, 0x400000, v30
; VI-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
; VI-NEXT: v_cndmask_b32_e32 v30, v55, v40, vcc
-; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_lshlrev_b32_e32 v55, 16, v31
; VI-NEXT: v_add_f32_e32 v55, 0x40c00000, v55
; VI-NEXT: v_bfe_u32 v40, v55, 16, 1
@@ -232488,6 +232224,9 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; GFX9-LABEL: bitcast_v64bf16_to_v64i16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -232504,9 +232243,7 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(17)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -232725,7 +232462,7 @@ define <64 x i16> @bitcast_v64bf16_to_v64i16(<64 x bfloat> %a, i32 %b) {
; GFX9-NEXT: v_or_b32_e32 v40, 0x400000, v30
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
; GFX9-NEXT: v_cndmask_b32_e32 v30, v55, v40, vcc
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: s_waitcnt vmcnt(17)
; GFX9-NEXT: v_lshlrev_b32_e32 v55, 16, v31
; GFX9-NEXT: v_add_f32_e32 v55, 0x40c00000, v55
; GFX9-NEXT: v_bfe_u32 v40, v55, 16, 1
@@ -234330,15 +234067,21 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: v_mov_b32_e32 v57, v13
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v34
-; SI-NEXT: v_mov_b32_e32 v57, v13
; SI-NEXT: v_mov_b32_e32 v40, v3
; SI-NEXT: v_mov_b32_e32 v54, v50
; SI-NEXT: v_mov_b32_e32 v46, v19
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v9
; SI-NEXT: v_mov_b32_e32 v44, v15
; SI-NEXT: v_mov_b32_e32 v9, v11
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v6
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v59
@@ -234372,32 +234115,24 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_mov_b32_e32 v42, v43
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0) expcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(7) expcnt(1)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v13
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(6) expcnt(1)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v3
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v11
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v15
; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v50
-; SI-NEXT: s_waitcnt vmcnt(2) expcnt(1)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19
+; SI-NEXT: v_mov_b32_e32 v5, v19
+; SI-NEXT: v_mov_b32_e32 v7, v15
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v17
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v11
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v15
-; SI-NEXT: v_mov_b32_e32 v5, v19
-; SI-NEXT: v_mov_b32_e32 v7, v15
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(1)
@@ -234533,9 +234268,7 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v54, v50
-; SI-NEXT: v_mov_b32_e32 v56, v47
; SI-NEXT: v_mov_b32_e32 v9, v11
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v53, v5
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
@@ -234543,6 +234276,8 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v56, v47
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v40, v3
; SI-NEXT: v_mov_b32_e32 v44, v15
; SI-NEXT: v_mov_b32_e32 v57, v13
@@ -234850,16 +234585,18 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; SI-NEXT: v_lshr_b64 v[51:52], v[25:26], 16
; SI-NEXT: v_lshr_b64 v[52:53], v[1:2], 16
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16
; SI-NEXT: v_alignbit_b32 v16, v45, v16, 16
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27
; SI-NEXT: v_alignbit_b32 v28, v58, v27, 16
@@ -234917,19 +234654,14 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16
; SI-NEXT: .LBB105_5: ; %end
-; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v52
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
@@ -234955,12 +234687,11 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v1, v1, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0
@@ -234985,12 +234716,11 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v1, v1, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v25
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0
@@ -240180,38 +239910,39 @@ define <64 x i16> @bitcast_v64f16_to_v64i16(<64 x half> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:92
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:88
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v9
; SI-NEXT: v_cvt_f16_f32_e32 v43, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v55, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v40, v8
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v5
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v55, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v2
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v22
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v40, v8
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v9, v14
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v6, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v9, v14
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v12, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v19
; SI-NEXT: v_cvt_f16_f32_e32 v19, v23
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v23, v25
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v13
; SI-NEXT: v_cvt_f16_f32_e32 v44, v4
; SI-NEXT: v_cvt_f16_f32_e32 v52, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v15
; SI-NEXT: v_cvt_f16_f32_e32 v48, v16
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v15
; SI-NEXT: v_cvt_f16_f32_e32 v4, v17
; SI-NEXT: v_cvt_f16_f32_e32 v13, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v24
@@ -240222,7 +239953,6 @@ define <64 x i16> @bitcast_v64f16_to_v64i16(<64 x half> %a, i32 %b) {
; SI-NEXT: v_cvt_f16_f32_e32 v31, v27
; SI-NEXT: v_cvt_f16_f32_e32 v25, v50
; SI-NEXT: v_cvt_f16_f32_e32 v27, v29
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v6, v42
; SI-NEXT: v_cvt_f16_f32_e32 v21, v47
; SI-NEXT: v_cvt_f16_f32_e32 v22, v38
@@ -241300,10 +241030,12 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1
; SI-NEXT: v_cvt_f32_f16_e32 v1, v50
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
@@ -241315,7 +241047,24 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v3, v26
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v49
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v54
+; SI-NEXT: v_mov_b32_e32 v54, v15
+; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v12
+; SI-NEXT: v_mov_b32_e32 v12, v42
+; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
@@ -241325,8 +241074,13 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v26, v3, v5
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v3, v22
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v49
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
@@ -241335,39 +241089,22 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v22, v3, v5
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v3, v18
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v49
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v54
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v18, v3, v5
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v3, v16
-; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_mov_b32_e32 v54, v15
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v12
-; SI-NEXT: v_mov_b32_e32 v12, v42
-; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
@@ -241385,8 +241122,6 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v14, v3, v5
@@ -241430,11 +241165,6 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v49
-; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v1
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v1, v50
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
@@ -241571,27 +241301,27 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_or_b32_e32 v12, v50, v1
; SI-NEXT: v_lshr_b64 v[49:50], v[35:36], 16
-; SI-NEXT: v_mov_b32_e32 v35, v44
-; SI-NEXT: v_lshr_b64 v[44:45], v[25:26], 16
; SI-NEXT: v_lshr_b64 v[50:51], v[21:22], 16
-; SI-NEXT: v_lshr_b64 v[24:25], v[17:18], 16
; SI-NEXT: v_lshr_b64 v[20:21], v[42:43], 16
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshr_b64 v[20:21], v[9:10], 16
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: v_mov_b32_e32 v35, v44
+; SI-NEXT: v_lshr_b64 v[44:45], v[25:26], 16
+; SI-NEXT: v_lshr_b64 v[24:25], v[17:18], 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshr_b64 v[20:21], v[40:41], 16
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshr_b64 v[12:13], v[13:14], 16
; SI-NEXT: v_lshr_b64 v[24:25], v[3:4], 16
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshr_b64 v[20:21], v[1:2], 16
; SI-NEXT: v_mov_b32_e32 v42, v61
; SI-NEXT: v_mov_b32_e32 v61, v37
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
index 9041f64..5b42f95 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
@@ -17964,14 +17964,6 @@ define <20 x i16> @bitcast_v40i8_to_v20i16(<40 x i8> %a, i32 %b) {
; VI-LABEL: bitcast_v40i8_to_v20i16:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v34, v10
; VI-NEXT: v_mov_b32_e32 v33, v8
; VI-NEXT: v_mov_b32_e32 v35, v6
@@ -17988,6 +17980,14 @@ define <20 x i16> @bitcast_v40i8_to_v20i16(<40 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v31, v14
; VI-NEXT: v_mov_b32_e32 v37, v12
; VI-NEXT: v_lshlrev_b16_e32 v39, 8, v1
@@ -18005,17 +18005,15 @@ define <20 x i16> @bitcast_v40i8_to_v20i16(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v25, 8, v25
; VI-NEXT: v_lshlrev_b16_e32 v27, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v29, 8, v29
-; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v43, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(8)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_lshlrev_b16_e32 v47, 8, v4
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_lshlrev_b16_e32 v46, 8, v6
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(12)
; VI-NEXT: v_lshlrev_b16_e32 v44, 8, v8
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(11)
; VI-NEXT: v_lshlrev_b16_e32 v45, 8, v10
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -18046,7 +18044,7 @@ define <20 x i16> @bitcast_v40i8_to_v20i16(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v7, v28, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v30, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(8)
; VI-NEXT: v_or_b32_sdwa v8, v51, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v53, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -18101,14 +18099,14 @@ define <20 x i16> @bitcast_v40i8_to_v20i16(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_mov_b32_e32 v1, 0x300
; VI-NEXT: v_add_u16_sdwa v9, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(10)
; VI-NEXT: v_add_u16_e32 v0, 3, v54
; VI-NEXT: v_or_b32_sdwa v10, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(9)
; VI-NEXT: v_add_u16_e32 v0, 3, v53
; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v8, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(8)
; VI-NEXT: v_add_u16_e32 v0, 3, v51
; VI-NEXT: v_or_b32_sdwa v11, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_e32 v0, 3, v30
@@ -23918,18 +23916,6 @@ define <20 x half> @bitcast_v40i8_to_v20f16(<40 x i8> %a, i32 %b) {
; SI-LABEL: bitcast_v40i8_to_v20f16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v36, v4
; SI-NEXT: v_mov_b32_e32 v31, v2
; SI-NEXT: v_mov_b32_e32 v35, v0
@@ -23943,6 +23929,18 @@ define <20 x half> @bitcast_v40i8_to_v20f16(<40 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; SI-NEXT: v_lshlrev_b32_e32 v37, 8, v1
; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v3
; SI-NEXT: v_lshlrev_b32_e32 v39, 8, v5
@@ -23974,20 +23972,16 @@ define <20 x half> @bitcast_v40i8_to_v20f16(<40 x i8> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr15
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: s_waitcnt vmcnt(9) expcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v47, 8, v0
-; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v59, 8, v4
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v58, 8, v32
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v56, 8, v33
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v34
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: ; implicit-def: $vgpr32
@@ -24027,7 +24021,7 @@ define <20 x half> @bitcast_v40i8_to_v20f16(<40 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v6, 0xff, v30
; SI-NEXT: v_or_b32_e32 v6, v6, v47
; SI-NEXT: v_cvt_f32_f16_e32 v15, v6
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_and_b32_e32 v6, 0xff, v50
; SI-NEXT: v_or_b32_e32 v6, v6, v56
; SI-NEXT: v_cvt_f32_f16_e32 v32, v6
@@ -24105,18 +24099,17 @@ define <20 x half> @bitcast_v40i8_to_v20f16(<40 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_or_b32_e32 v0, v59, v0
; SI-NEXT: v_add_i32_e32 v19, vcc, 0x300, v0
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_movk_i32 s6, 0x300
; SI-NEXT: v_or_b32_e32 v0, v58, v0
; SI-NEXT: v_add_i32_e32 v34, vcc, s6, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_or_b32_e32 v0, v57, v0
; SI-NEXT: v_add_i32_e32 v17, vcc, s6, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_or_b32_e32 v0, v56, v0
@@ -24232,14 +24225,6 @@ define <20 x half> @bitcast_v40i8_to_v20f16(<40 x i8> %a, i32 %b) {
; VI-LABEL: bitcast_v40i8_to_v20f16:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v34, v10
; VI-NEXT: v_mov_b32_e32 v33, v8
; VI-NEXT: v_mov_b32_e32 v35, v6
@@ -24256,6 +24241,14 @@ define <20 x half> @bitcast_v40i8_to_v20f16(<40 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v31, v14
; VI-NEXT: v_mov_b32_e32 v37, v12
; VI-NEXT: v_lshlrev_b16_e32 v39, 8, v1
@@ -24273,17 +24266,15 @@ define <20 x half> @bitcast_v40i8_to_v20f16(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v25, 8, v25
; VI-NEXT: v_lshlrev_b16_e32 v27, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v29, 8, v29
-; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v43, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(8)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_lshlrev_b16_e32 v47, 8, v4
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_lshlrev_b16_e32 v46, 8, v6
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(12)
; VI-NEXT: v_lshlrev_b16_e32 v44, 8, v8
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(11)
; VI-NEXT: v_lshlrev_b16_e32 v45, 8, v10
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -24314,7 +24305,7 @@ define <20 x half> @bitcast_v40i8_to_v20f16(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v7, v28, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v30, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(8)
; VI-NEXT: v_or_b32_sdwa v8, v51, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v53, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -24369,14 +24360,14 @@ define <20 x half> @bitcast_v40i8_to_v20f16(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_mov_b32_e32 v1, 0x300
; VI-NEXT: v_add_u16_sdwa v9, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(10)
; VI-NEXT: v_add_u16_e32 v0, 3, v54
; VI-NEXT: v_or_b32_sdwa v10, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(9)
; VI-NEXT: v_add_u16_e32 v0, 3, v53
; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v8, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(8)
; VI-NEXT: v_add_u16_e32 v0, 3, v51
; VI-NEXT: v_or_b32_sdwa v11, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_e32 v0, 3, v30
@@ -28252,15 +28243,6 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; SI-LABEL: bitcast_v40i8_to_v5f64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v36, v10
; SI-NEXT: v_mov_b32_e32 v35, v8
; SI-NEXT: v_mov_b32_e32 v34, v6
@@ -28277,6 +28259,15 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v38, v14
; SI-NEXT: v_mov_b32_e32 v37, v12
; SI-NEXT: s_waitcnt expcnt(0)
@@ -28295,17 +28286,14 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v52, 8, v25
; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v27
; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v29
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v0
-; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v4
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v6
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v8
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v10
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -28368,7 +28356,7 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
; SI-NEXT: v_or_b32_e32 v8, v25, v8
; SI-NEXT: v_or_b32_e32 v7, v7, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_and_b32_e32 v8, 0xff, v50
; SI-NEXT: v_and_b32_e32 v9, 0xff, v49
; SI-NEXT: v_or_b32_e32 v8, v8, v23
@@ -28508,7 +28496,7 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
; SI-NEXT: v_or_b32_e32 v8, v25, v8
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v8, 0xff, v8
; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v49
@@ -28557,15 +28545,6 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; VI-LABEL: bitcast_v40i8_to_v5f64:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v36, v10
; VI-NEXT: v_mov_b32_e32 v35, v8
; VI-NEXT: v_mov_b32_e32 v34, v6
@@ -28582,6 +28561,15 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v38, v14
; VI-NEXT: v_mov_b32_e32 v37, v12
; VI-NEXT: v_lshlrev_b16_e32 v56, 8, v1
@@ -28599,17 +28587,14 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v52, 8, v25
; VI-NEXT: v_lshlrev_b16_e32 v51, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v27, 8, v29
-; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v25, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(8)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_lshlrev_b16_e32 v17, 8, v4
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b16_e32 v19, 8, v6
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_lshlrev_b16_e32 v23, 8, v8
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(12)
; VI-NEXT: v_lshlrev_b16_e32 v21, 8, v10
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -28640,7 +28625,7 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v7, v28, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v30, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(9)
; VI-NEXT: v_or_b32_sdwa v8, v50, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v49, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -28748,7 +28733,7 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_add_u16_e32 v7, 0x300, v7
; VI-NEXT: v_add_u16_sdwa v8, v8, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v7, v7, v8
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(9)
; VI-NEXT: v_add_u16_e32 v8, 3, v50
; VI-NEXT: v_add_u16_e32 v10, 3, v49
; VI-NEXT: v_or_b32_sdwa v8, v23, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -28780,15 +28765,6 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; GFX9-LABEL: bitcast_v40i8_to_v5f64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v36, v10
; GFX9-NEXT: v_mov_b32_e32 v35, v8
; GFX9-NEXT: v_mov_b32_e32 v34, v6
@@ -28805,6 +28781,16 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:20
; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:12
; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:4
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v38, v14
; GFX9-NEXT: v_mov_b32_e32 v37, v12
; GFX9-NEXT: v_lshlrev_b16_e32 v56, 8, v1
@@ -28822,17 +28808,17 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v52, 8, v25
; GFX9-NEXT: v_lshlrev_b16_e32 v51, 8, v27
; GFX9-NEXT: v_lshlrev_b16_e32 v27, 8, v29
-; GFX9-NEXT: s_waitcnt vmcnt(9)
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v25, 8, v0
-; GFX9-NEXT: s_waitcnt vmcnt(8)
+; GFX9-NEXT: s_waitcnt vmcnt(17)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: s_waitcnt vmcnt(16)
; GFX9-NEXT: v_lshlrev_b16_e32 v17, 8, v4
-; GFX9-NEXT: s_waitcnt vmcnt(5)
+; GFX9-NEXT: s_waitcnt vmcnt(14)
; GFX9-NEXT: v_lshlrev_b16_e32 v19, 8, v6
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_lshlrev_b16_e32 v23, 8, v8
-; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: s_waitcnt vmcnt(12)
; GFX9-NEXT: v_lshlrev_b16_e32 v21, 8, v10
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -28863,7 +28849,7 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; GFX9-NEXT: v_or_b32_sdwa v7, v28, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v30, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(9)
; GFX9-NEXT: v_or_b32_sdwa v8, v50, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v49, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -28971,7 +28957,7 @@ define <5 x double> @bitcast_v40i8_to_v5f64(<40 x i8> %a, i32 %b) {
; GFX9-NEXT: v_add_u16_e32 v7, 0x300, v7
; GFX9-NEXT: v_add_u16_sdwa v8, v8, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v7, v7, v8
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(9)
; GFX9-NEXT: v_add_u16_e32 v8, 3, v50
; GFX9-NEXT: v_add_u16_e32 v9, 3, v49
; GFX9-NEXT: v_or_b32_sdwa v8, v23, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -32301,15 +32287,6 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; SI-LABEL: bitcast_v40i8_to_v5i64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v36, v10
; SI-NEXT: v_mov_b32_e32 v35, v8
; SI-NEXT: v_mov_b32_e32 v34, v6
@@ -32326,6 +32303,15 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v38, v14
; SI-NEXT: v_mov_b32_e32 v37, v12
; SI-NEXT: s_waitcnt expcnt(0)
@@ -32344,17 +32330,14 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v52, 8, v25
; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v27
; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v29
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v0
-; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v4
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v6
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v8
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v10
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -32417,7 +32400,7 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
; SI-NEXT: v_or_b32_e32 v8, v25, v8
; SI-NEXT: v_or_b32_e32 v7, v7, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_and_b32_e32 v8, 0xff, v50
; SI-NEXT: v_and_b32_e32 v9, 0xff, v49
; SI-NEXT: v_or_b32_e32 v8, v8, v23
@@ -32557,7 +32540,7 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
; SI-NEXT: v_or_b32_e32 v8, v25, v8
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v8, 0xff, v8
; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v49
@@ -32606,15 +32589,6 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; VI-LABEL: bitcast_v40i8_to_v5i64:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v36, v10
; VI-NEXT: v_mov_b32_e32 v35, v8
; VI-NEXT: v_mov_b32_e32 v34, v6
@@ -32631,6 +32605,15 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v38, v14
; VI-NEXT: v_mov_b32_e32 v37, v12
; VI-NEXT: v_lshlrev_b16_e32 v56, 8, v1
@@ -32648,17 +32631,14 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v52, 8, v25
; VI-NEXT: v_lshlrev_b16_e32 v51, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v27, 8, v29
-; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b16_e32 v25, 8, v0
-; VI-NEXT: s_waitcnt vmcnt(8)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: s_waitcnt vmcnt(7)
; VI-NEXT: v_lshlrev_b16_e32 v17, 8, v4
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_lshlrev_b16_e32 v19, 8, v6
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: s_waitcnt vmcnt(13)
; VI-NEXT: v_lshlrev_b16_e32 v23, 8, v8
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: s_waitcnt vmcnt(12)
; VI-NEXT: v_lshlrev_b16_e32 v21, 8, v10
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -32689,7 +32669,7 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_or_b32_sdwa v7, v28, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v30, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(9)
; VI-NEXT: v_or_b32_sdwa v8, v50, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v49, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -32797,7 +32777,7 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; VI-NEXT: v_add_u16_e32 v7, 0x300, v7
; VI-NEXT: v_add_u16_sdwa v8, v8, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v7, v7, v8
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_waitcnt vmcnt(9)
; VI-NEXT: v_add_u16_e32 v8, 3, v50
; VI-NEXT: v_add_u16_e32 v10, 3, v49
; VI-NEXT: v_or_b32_sdwa v8, v23, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -32829,15 +32809,6 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; GFX9-LABEL: bitcast_v40i8_to_v5i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v36, v10
; GFX9-NEXT: v_mov_b32_e32 v35, v8
; GFX9-NEXT: v_mov_b32_e32 v34, v6
@@ -32854,6 +32825,16 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:20
; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:12
; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:4
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v38, v14
; GFX9-NEXT: v_mov_b32_e32 v37, v12
; GFX9-NEXT: v_lshlrev_b16_e32 v56, 8, v1
@@ -32871,17 +32852,17 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v52, 8, v25
; GFX9-NEXT: v_lshlrev_b16_e32 v51, 8, v27
; GFX9-NEXT: v_lshlrev_b16_e32 v27, 8, v29
-; GFX9-NEXT: s_waitcnt vmcnt(9)
+; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b16_e32 v25, 8, v0
-; GFX9-NEXT: s_waitcnt vmcnt(8)
+; GFX9-NEXT: s_waitcnt vmcnt(17)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: s_waitcnt vmcnt(16)
; GFX9-NEXT: v_lshlrev_b16_e32 v17, 8, v4
-; GFX9-NEXT: s_waitcnt vmcnt(5)
+; GFX9-NEXT: s_waitcnt vmcnt(14)
; GFX9-NEXT: v_lshlrev_b16_e32 v19, 8, v6
-; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_lshlrev_b16_e32 v23, 8, v8
-; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: s_waitcnt vmcnt(12)
; GFX9-NEXT: v_lshlrev_b16_e32 v21, 8, v10
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -32912,7 +32893,7 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; GFX9-NEXT: v_or_b32_sdwa v7, v28, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v30, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(9)
; GFX9-NEXT: v_or_b32_sdwa v8, v50, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v49, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -33020,7 +33001,7 @@ define <5 x i64> @bitcast_v40i8_to_v5i64(<40 x i8> %a, i32 %b) {
; GFX9-NEXT: v_add_u16_e32 v7, 0x300, v7
; GFX9-NEXT: v_add_u16_sdwa v8, v8, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v7, v7, v8
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(9)
; GFX9-NEXT: v_add_u16_e32 v8, 3, v50
; GFX9-NEXT: v_add_u16_e32 v9, 3, v49
; GFX9-NEXT: v_or_b32_sdwa v8, v23, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
index ee23420..c8d1762 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
@@ -2406,13 +2406,13 @@ define <16 x i32> @bitcast_v32i16_to_v16i32(<32 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v32i16_to_v16i32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v32, v2
; SI-NEXT: v_mov_b32_e32 v31, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v38, v14
; SI-NEXT: v_mov_b32_e32 v37, v12
; SI-NEXT: v_mov_b32_e32 v36, v10
@@ -2435,9 +2435,9 @@ define <16 x i32> @bitcast_v32i16_to_v16i32(<32 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v2
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -11440,11 +11440,6 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v47
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:124
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:100
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:92
@@ -11453,6 +11448,11 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:68
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:124
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -11484,7 +11484,6 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v11, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v43, v11
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v12, 0xff, v58
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v54, v12
@@ -11723,7 +11722,6 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v11, 0xff, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v43, v11
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v58
; SI-NEXT: v_and_b32_e32 v12, 0xff, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
@@ -11972,11 +11970,11 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:112
; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:120
; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:128
+; VI-NEXT: v_lshlrev_b16_e32 v28, 8, v25
+; VI-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v20, 8, v19
; VI-NEXT: v_lshlrev_b16_e32 v22, 8, v21
; VI-NEXT: v_lshlrev_b16_e32 v24, 8, v23
-; VI-NEXT: v_lshlrev_b16_e32 v28, 8, v25
-; VI-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v29
; VI-NEXT: v_lshlrev_b16_e32 v18, 8, v17
; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:124
@@ -12016,16 +12014,9 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v25, 8, v53
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_lshlrev_b16_e32 v21, 8, v40
-; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
-; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_lshlrev_b16_e32 v27, 8, v41
-; VI-NEXT: s_waitcnt vmcnt(8)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_lshlrev_b16_e32 v60, 8, v45
; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:108
; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:100
@@ -12035,6 +12026,13 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:68
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -12044,11 +12042,10 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_or_b32_sdwa v9, v40, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v10, v49, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v23, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v12, v38, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v58, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -12211,7 +12208,7 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v15, 0x300
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_add_u16_e32 v9, 3, v40
; VI-NEXT: v_or_b32_sdwa v9, v57, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v9, v9, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -12221,7 +12218,6 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; VI-NEXT: v_add_u16_e32 v11, 3, v23
; VI-NEXT: v_or_b32_sdwa v11, v42, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v11, v11, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_add_u16_e32 v12, 3, v38
; VI-NEXT: v_or_b32_sdwa v12, v54, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v12, v12, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -12428,11 +12424,11 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:112
; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:120
; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:128
+; GFX9-NEXT: v_lshlrev_b16_e32 v28, 8, v25
+; GFX9-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; GFX9-NEXT: v_lshlrev_b16_e32 v20, 8, v19
; GFX9-NEXT: v_lshlrev_b16_e32 v22, 8, v21
; GFX9-NEXT: v_lshlrev_b16_e32 v24, 8, v23
-; GFX9-NEXT: v_lshlrev_b16_e32 v28, 8, v25
-; GFX9-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v29
; GFX9-NEXT: v_lshlrev_b16_e32 v18, 8, v17
; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:124
@@ -12476,16 +12472,9 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v25, 8, v53
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_lshlrev_b16_e32 v21, 8, v40
-; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
-; GFX9-NEXT: s_waitcnt vmcnt(9)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_lshlrev_b16_e32 v27, 8, v41
-; GFX9-NEXT: s_waitcnt vmcnt(8)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_lshlrev_b16_e32 v60, 8, v45
; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:108
; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:100
@@ -12495,6 +12484,13 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:68
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -12504,11 +12500,10 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v9, v40, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v49, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v23, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_or_b32_sdwa v12, v38, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v58, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -12671,7 +12666,7 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; GFX9-NEXT: s_movk_i32 s6, 0x300
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_add_u16_e32 v9, 3, v40
; GFX9-NEXT: v_or_b32_sdwa v9, v57, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v9, v9, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -12681,7 +12676,6 @@ define <16 x i32> @bitcast_v64i8_to_v16i32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: v_add_u16_e32 v11, 3, v23
; GFX9-NEXT: v_or_b32_sdwa v11, v42, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v11, v11, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_add_u16_e32 v12, 3, v38
; GFX9-NEXT: v_or_b32_sdwa v12, v54, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v12, v12, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -17323,13 +17317,13 @@ define <16 x float> @bitcast_v32i16_to_v16f32(<32 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v32i16_to_v16f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v32, v2
; SI-NEXT: v_mov_b32_e32 v31, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v38, v14
; SI-NEXT: v_mov_b32_e32 v37, v12
; SI-NEXT: v_mov_b32_e32 v36, v10
@@ -17352,9 +17346,9 @@ define <16 x float> @bitcast_v32i16_to_v16f32(<32 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v2
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -26452,11 +26446,6 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v47
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:124
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:100
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:92
@@ -26465,6 +26454,11 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:68
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:124
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -26496,7 +26490,6 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v11, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v43, v11
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v12, 0xff, v58
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v54, v12
@@ -26735,7 +26728,6 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v11, 0xff, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v43, v11
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v58
; SI-NEXT: v_and_b32_e32 v12, 0xff, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
@@ -26984,11 +26976,11 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:112
; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:120
; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:128
+; VI-NEXT: v_lshlrev_b16_e32 v28, 8, v25
+; VI-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v20, 8, v19
; VI-NEXT: v_lshlrev_b16_e32 v22, 8, v21
; VI-NEXT: v_lshlrev_b16_e32 v24, 8, v23
-; VI-NEXT: v_lshlrev_b16_e32 v28, 8, v25
-; VI-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v29
; VI-NEXT: v_lshlrev_b16_e32 v18, 8, v17
; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:124
@@ -27028,16 +27020,9 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v25, 8, v53
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_lshlrev_b16_e32 v21, 8, v40
-; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
-; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_lshlrev_b16_e32 v27, 8, v41
-; VI-NEXT: s_waitcnt vmcnt(8)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_lshlrev_b16_e32 v60, 8, v45
; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:108
; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:100
@@ -27047,6 +27032,13 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:68
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -27056,11 +27048,10 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_or_b32_sdwa v9, v40, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v10, v49, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v23, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v12, v38, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v58, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -27223,7 +27214,7 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v15, 0x300
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_add_u16_e32 v9, 3, v40
; VI-NEXT: v_or_b32_sdwa v9, v57, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v9, v9, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -27233,7 +27224,6 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; VI-NEXT: v_add_u16_e32 v11, 3, v23
; VI-NEXT: v_or_b32_sdwa v11, v42, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v11, v11, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_add_u16_e32 v12, 3, v38
; VI-NEXT: v_or_b32_sdwa v12, v54, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v12, v12, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -27440,11 +27430,11 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:112
; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:120
; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:128
+; GFX9-NEXT: v_lshlrev_b16_e32 v28, 8, v25
+; GFX9-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; GFX9-NEXT: v_lshlrev_b16_e32 v20, 8, v19
; GFX9-NEXT: v_lshlrev_b16_e32 v22, 8, v21
; GFX9-NEXT: v_lshlrev_b16_e32 v24, 8, v23
-; GFX9-NEXT: v_lshlrev_b16_e32 v28, 8, v25
-; GFX9-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v29
; GFX9-NEXT: v_lshlrev_b16_e32 v18, 8, v17
; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:124
@@ -27488,16 +27478,9 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v25, 8, v53
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_lshlrev_b16_e32 v21, 8, v40
-; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
-; GFX9-NEXT: s_waitcnt vmcnt(9)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_lshlrev_b16_e32 v27, 8, v41
-; GFX9-NEXT: s_waitcnt vmcnt(8)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_lshlrev_b16_e32 v60, 8, v45
; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:108
; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:100
@@ -27507,6 +27490,13 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:68
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -27516,11 +27506,10 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v9, v40, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v49, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v23, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_or_b32_sdwa v12, v38, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v58, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -27683,7 +27672,7 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; GFX9-NEXT: s_movk_i32 s6, 0x300
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_add_u16_e32 v9, 3, v40
; GFX9-NEXT: v_or_b32_sdwa v9, v57, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v9, v9, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -27693,7 +27682,6 @@ define <16 x float> @bitcast_v64i8_to_v16f32(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: v_add_u16_e32 v11, 3, v23
; GFX9-NEXT: v_or_b32_sdwa v11, v42, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v11, v11, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_add_u16_e32 v12, 3, v38
; GFX9-NEXT: v_or_b32_sdwa v12, v54, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v12, v12, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -31688,13 +31676,13 @@ define <8 x i64> @bitcast_v32i16_to_v8i64(<32 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v32i16_to_v8i64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v32, v2
; SI-NEXT: v_mov_b32_e32 v31, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v38, v14
; SI-NEXT: v_mov_b32_e32 v37, v12
; SI-NEXT: v_mov_b32_e32 v36, v10
@@ -31717,9 +31705,9 @@ define <8 x i64> @bitcast_v32i16_to_v8i64(<32 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v2
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -40740,11 +40728,6 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v47
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:124
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:100
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:92
@@ -40753,6 +40736,11 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:68
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:124
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -40784,7 +40772,6 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v11, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v43, v11
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v12, 0xff, v58
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v54, v12
@@ -41023,7 +41010,6 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v11, 0xff, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v43, v11
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v58
; SI-NEXT: v_and_b32_e32 v12, 0xff, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
@@ -41272,11 +41258,11 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:112
; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:120
; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:128
+; VI-NEXT: v_lshlrev_b16_e32 v28, 8, v25
+; VI-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v20, 8, v19
; VI-NEXT: v_lshlrev_b16_e32 v22, 8, v21
; VI-NEXT: v_lshlrev_b16_e32 v24, 8, v23
-; VI-NEXT: v_lshlrev_b16_e32 v28, 8, v25
-; VI-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v29
; VI-NEXT: v_lshlrev_b16_e32 v18, 8, v17
; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:124
@@ -41316,16 +41302,9 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v25, 8, v53
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_lshlrev_b16_e32 v21, 8, v40
-; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
-; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_lshlrev_b16_e32 v27, 8, v41
-; VI-NEXT: s_waitcnt vmcnt(8)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_lshlrev_b16_e32 v60, 8, v45
; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:108
; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:100
@@ -41335,6 +41314,13 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:68
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -41344,11 +41330,10 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_or_b32_sdwa v9, v40, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v10, v49, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v23, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v12, v38, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v58, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -41511,7 +41496,7 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v15, 0x300
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_add_u16_e32 v9, 3, v40
; VI-NEXT: v_or_b32_sdwa v9, v57, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v9, v9, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -41521,7 +41506,6 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; VI-NEXT: v_add_u16_e32 v11, 3, v23
; VI-NEXT: v_or_b32_sdwa v11, v42, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v11, v11, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_add_u16_e32 v12, 3, v38
; VI-NEXT: v_or_b32_sdwa v12, v54, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v12, v12, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -41728,11 +41712,11 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:112
; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:120
; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:128
+; GFX9-NEXT: v_lshlrev_b16_e32 v28, 8, v25
+; GFX9-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; GFX9-NEXT: v_lshlrev_b16_e32 v20, 8, v19
; GFX9-NEXT: v_lshlrev_b16_e32 v22, 8, v21
; GFX9-NEXT: v_lshlrev_b16_e32 v24, 8, v23
-; GFX9-NEXT: v_lshlrev_b16_e32 v28, 8, v25
-; GFX9-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v29
; GFX9-NEXT: v_lshlrev_b16_e32 v18, 8, v17
; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:124
@@ -41776,16 +41760,9 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v25, 8, v53
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_lshlrev_b16_e32 v21, 8, v40
-; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
-; GFX9-NEXT: s_waitcnt vmcnt(9)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_lshlrev_b16_e32 v27, 8, v41
-; GFX9-NEXT: s_waitcnt vmcnt(8)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_lshlrev_b16_e32 v60, 8, v45
; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:108
; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:100
@@ -41795,6 +41772,13 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:68
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -41804,11 +41788,10 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v9, v40, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v49, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v23, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_or_b32_sdwa v12, v38, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v58, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -41971,7 +41954,7 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; GFX9-NEXT: s_movk_i32 s6, 0x300
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_add_u16_e32 v9, 3, v40
; GFX9-NEXT: v_or_b32_sdwa v9, v57, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v9, v9, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -41981,7 +41964,6 @@ define <8 x i64> @bitcast_v64i8_to_v8i64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: v_add_u16_e32 v11, 3, v23
; GFX9-NEXT: v_or_b32_sdwa v11, v42, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v11, v11, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_add_u16_e32 v12, 3, v38
; GFX9-NEXT: v_or_b32_sdwa v12, v54, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v12, v12, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -45317,13 +45299,13 @@ define <8 x double> @bitcast_v32i16_to_v8f64(<32 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v32i16_to_v8f64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v32, v2
; SI-NEXT: v_mov_b32_e32 v31, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v38, v14
; SI-NEXT: v_mov_b32_e32 v37, v12
; SI-NEXT: v_mov_b32_e32 v36, v10
@@ -45346,9 +45328,9 @@ define <8 x double> @bitcast_v32i16_to_v8f64(<32 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v2
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -54188,11 +54170,6 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v47
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:124
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:100
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:92
@@ -54201,6 +54178,11 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:68
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:124
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -54232,7 +54214,6 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v11, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v43, v11
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v12, 0xff, v58
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v54, v12
@@ -54471,7 +54452,6 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v11, 0xff, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v43, v11
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v58
; SI-NEXT: v_and_b32_e32 v12, 0xff, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
@@ -54720,11 +54700,11 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:112
; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:120
; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:128
+; VI-NEXT: v_lshlrev_b16_e32 v28, 8, v25
+; VI-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v20, 8, v19
; VI-NEXT: v_lshlrev_b16_e32 v22, 8, v21
; VI-NEXT: v_lshlrev_b16_e32 v24, 8, v23
-; VI-NEXT: v_lshlrev_b16_e32 v28, 8, v25
-; VI-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; VI-NEXT: v_lshlrev_b16_e32 v63, 8, v29
; VI-NEXT: v_lshlrev_b16_e32 v18, 8, v17
; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:124
@@ -54764,16 +54744,9 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; VI-NEXT: v_lshlrev_b16_e32 v25, 8, v53
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_lshlrev_b16_e32 v21, 8, v40
-; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
-; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
-; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_lshlrev_b16_e32 v27, 8, v41
-; VI-NEXT: s_waitcnt vmcnt(8)
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_lshlrev_b16_e32 v60, 8, v45
; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:108
; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:100
@@ -54783,6 +54756,13 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:68
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -54792,11 +54772,10 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_or_b32_sdwa v9, v40, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v10, v49, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v23, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v12, v38, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v58, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -54959,7 +54938,7 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; VI-NEXT: v_mov_b32_e32 v15, 0x300
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_add_u16_e32 v9, 3, v40
; VI-NEXT: v_or_b32_sdwa v9, v57, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v9, v9, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -54969,7 +54948,6 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; VI-NEXT: v_add_u16_e32 v11, 3, v23
; VI-NEXT: v_or_b32_sdwa v11, v42, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v11, v11, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_add_u16_e32 v12, 3, v38
; VI-NEXT: v_or_b32_sdwa v12, v54, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u16_sdwa v12, v12, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -55176,11 +55154,11 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:112
; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:120
; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:128
+; GFX9-NEXT: v_lshlrev_b16_e32 v28, 8, v25
+; GFX9-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; GFX9-NEXT: v_lshlrev_b16_e32 v20, 8, v19
; GFX9-NEXT: v_lshlrev_b16_e32 v22, 8, v21
; GFX9-NEXT: v_lshlrev_b16_e32 v24, 8, v23
-; GFX9-NEXT: v_lshlrev_b16_e32 v28, 8, v25
-; GFX9-NEXT: v_lshlrev_b16_e32 v30, 8, v27
; GFX9-NEXT: v_lshlrev_b16_e32 v63, 8, v29
; GFX9-NEXT: v_lshlrev_b16_e32 v18, 8, v17
; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:124
@@ -55224,16 +55202,9 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: v_lshlrev_b16_e32 v25, 8, v53
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_lshlrev_b16_e32 v21, 8, v40
-; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
-; GFX9-NEXT: s_waitcnt vmcnt(9)
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_lshlrev_b16_e32 v27, 8, v41
-; GFX9-NEXT: s_waitcnt vmcnt(8)
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_lshlrev_b16_e32 v60, 8, v45
; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:108
; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:100
@@ -55243,6 +55214,13 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:68
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:116
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -55252,11 +55230,10 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_or_b32_sdwa v9, v40, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v49, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v23, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_or_b32_sdwa v12, v38, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v58, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -55419,7 +55396,7 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; GFX9-NEXT: s_movk_i32 s6, 0x300
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_add_u16_e32 v9, 3, v40
; GFX9-NEXT: v_or_b32_sdwa v9, v57, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v9, v9, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -55429,7 +55406,6 @@ define <8 x double> @bitcast_v64i8_to_v8f64(<64 x i8> %a, i32 %b) {
; GFX9-NEXT: v_add_u16_e32 v11, 3, v23
; GFX9-NEXT: v_or_b32_sdwa v11, v42, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v11, v11, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(5)
; GFX9-NEXT: v_add_u16_e32 v12, 3, v38
; GFX9-NEXT: v_or_b32_sdwa v12, v54, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u16_sdwa v12, v12, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -60580,6 +60556,8 @@ define <32 x i16> @bitcast_v32bf16_to_v32i16(<32 x bfloat> %a, i32 %b) {
; SI-LABEL: bitcast_v32bf16_to_v32i16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -60596,8 +60574,6 @@ define <32 x i16> @bitcast_v32bf16_to_v32i16(<32 x bfloat> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v63, 1.0, v0
; SI-NEXT: v_mul_f32_e32 v62, 1.0, v1
@@ -60661,9 +60637,8 @@ define <32 x i16> @bitcast_v32bf16_to_v32i16(<32 x bfloat> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr28
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mul_f32_e32 v55, 1.0, v55
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -64471,44 +64446,44 @@ define <64 x i8> @bitcast_v32i16_to_v64i8(<32 x i16> %a, i32 %b) {
; VI-NEXT: v_lshrrev_b32_e32 v17, 24, v14
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v14
+; VI-NEXT: v_lshrrev_b64 v[19:20], 24, v[15:16]
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v13
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 24, v12
+; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v12
+; VI-NEXT: v_lshrrev_b64 v[19:20], 24, v[13:14]
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v11
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 24, v10
+; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v10
+; VI-NEXT: v_lshrrev_b64 v[19:20], 24, v[11:12]
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 24, v8
+; VI-NEXT: v_lshrrev_b64 v[20:21], 24, v[9:10]
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v8
+; VI-NEXT: v_lshrrev_b64 v[21:22], 24, v[7:8]
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v7
+; VI-NEXT: v_lshrrev_b64 v[22:23], 24, v[5:6]
; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v6
-; VI-NEXT: v_lshrrev_b64 v[19:20], 24, v[15:16]
-; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v5
-; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[19:20], 24, v[13:14]
-; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; VI-NEXT: v_lshrrev_b64 v[19:20], 24, v[11:12]
-; VI-NEXT: v_lshrrev_b64 v[20:21], 24, v[9:10]
-; VI-NEXT: v_lshrrev_b64 v[21:22], 24, v[7:8]
-; VI-NEXT: v_lshrrev_b64 v[22:23], 24, v[5:6]
; VI-NEXT: v_lshrrev_b64 v[23:24], 24, v[3:4]
; VI-NEXT: v_lshrrev_b32_e32 v50, 24, v16
+; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; VI-NEXT: v_lshrrev_b32_e32 v17, 8, v5
; VI-NEXT: v_lshrrev_b64 v[24:25], 24, v[1:2]
; VI-NEXT: v_lshrrev_b32_e32 v42, 8, v9
; VI-NEXT: v_lshrrev_b32_e32 v43, 24, v6
+; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; VI-NEXT: v_lshrrev_b32_e32 v46, 24, v4
; VI-NEXT: v_lshrrev_b32_e32 v55, 8, v4
; VI-NEXT: v_lshrrev_b32_e32 v51, 8, v3
@@ -67768,17 +67743,61 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:84
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:100
; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v13
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v15
; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v11
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v9
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v7
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v23
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v17
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v21
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; SI-NEXT: v_lshlrev_b32_e32 v63, 24, v19
; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v27
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: ; implicit-def: $vgpr7
+; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: ; kill: killed $vgpr7
+; SI-NEXT: ; implicit-def: $vgpr7
+; SI-NEXT: ; implicit-def: $vgpr9
+; SI-NEXT: ; implicit-def: $vgpr11
+; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr51
+; SI-NEXT: ; implicit-def: $vgpr15
+; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr52
; SI-NEXT: ; implicit-def: $vgpr19
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr53
+; SI-NEXT: ; implicit-def: $vgpr23
; SI-NEXT: ; implicit-def: $vgpr54
; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr55
@@ -67793,25 +67812,24 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v31
; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v32
; SI-NEXT: v_lshlrev_b32_e32 v44, 8, v33
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v34
-; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_lshlrev_b32_e32 v56, 24, v35
-; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v36
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v37
-; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v61, 24, v38
; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v25
-; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v39
-; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v45, 8, v48
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v49
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v50
; SI-NEXT: ; implicit-def: $vgpr37
; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; implicit-def: $vgpr35
+; SI-NEXT: ; implicit-def: $vgpr49
; SI-NEXT: ; implicit-def: $vgpr33
+; SI-NEXT: ; implicit-def: $vgpr50
; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr34
; SI-NEXT: ; implicit-def: $vgpr36
@@ -67819,7 +67837,6 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
@@ -67833,57 +67850,8 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:4
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v13
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v15
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v11
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v9
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v7
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v23
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v17
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:12
; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v29
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v49
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v50
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr49
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr50
-; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: ; implicit-def: $vgpr23
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -67892,7 +67860,6 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v21, 0xff, v58
; SI-NEXT: v_or_b32_e32 v21, v21, v26
; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21
@@ -68173,7 +68140,6 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB98_4
; SI-NEXT: ; %bb.3: ; %cmp.true
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v18
; SI-NEXT: v_and_b32_e32 v3, 0xff, v3
; SI-NEXT: v_or_b32_e32 v1, v1, v3
@@ -68198,7 +68164,6 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_or_b32_e32 v3, v59, v3
; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_add_i32_e32 v55, vcc, s7, v1
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v42
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v8
@@ -68222,7 +68187,6 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v54, vcc, s7, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v57
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
@@ -68430,8 +68394,6 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: .LBB98_4: ; %end
; SI-NEXT: s_or_b64 exec, exec, s[4:5]
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
@@ -68448,6 +68410,8 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, v37
; SI-NEXT: v_mov_b32_e32 v2, v48
@@ -68458,7 +68422,6 @@ define <32 x i16> @bitcast_v64i8_to_v32i16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_mov_b32_e32 v12, v32
; SI-NEXT: v_mov_b32_e32 v14, v51
; SI-NEXT: v_mov_b32_e32 v16, v34
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_mov_b32_e32 v18, v52
; SI-NEXT: v_mov_b32_e32 v20, v36
; SI-NEXT: v_mov_b32_e32 v22, v53
@@ -70196,13 +70159,12 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v46, v30
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:24
-; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:48
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:44
@@ -70219,6 +70181,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:72
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: v_readfirstlane_b32 s43, v1
; SI-NEXT: v_readfirstlane_b32 s42, v0
; SI-NEXT: v_lshlrev_b32_e32 v42, 8, v3
@@ -70242,19 +70205,19 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36
; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v48
-; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v39
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_lshlrev_b32_e32 v61, 8, v37
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v49
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v30
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v31
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v33
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v34
; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
@@ -70280,7 +70243,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(10) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(11) expcnt(0)
; SI-NEXT: v_mov_b32_e32 v60, v44
; SI-NEXT: v_or_b32_e32 v44, v53, v9
; SI-NEXT: v_or_b32_e32 v33, v1, v44
@@ -70725,12 +70688,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: .LBB99_3: ; %end
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
@@ -70747,6 +70704,12 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: s_waitcnt expcnt(0)
@@ -70758,11 +70721,13 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s5
; SI-NEXT: v_mov_b32_e32 v7, s11
; SI-NEXT: v_mov_b32_e32 v8, v37
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_mov_b32_e32 v10, v38
; SI-NEXT: v_mov_b32_e32 v12, v33
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_mov_b32_e32 v14, v34
; SI-NEXT: v_mov_b32_e32 v16, v48
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mov_b32_e32 v18, v49
; SI-NEXT: v_mov_b32_e32 v20, v35
; SI-NEXT: v_mov_b32_e32 v22, v36
@@ -70770,7 +70735,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v26, v51
; SI-NEXT: v_mov_b32_e32 v28, v54
; SI-NEXT: v_mov_b32_e32 v30, v55
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB99_4:
; SI-NEXT: v_mov_b32_e32 v39, v32
@@ -72188,6 +72152,8 @@ define <32 x bfloat> @bitcast_v32f16_to_v32bf16(<32 x half> %a, i32 %b) {
; SI-LABEL: bitcast_v32f16_to_v32bf16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -72204,8 +72170,6 @@ define <32 x bfloat> @bitcast_v32f16_to_v32bf16(<32 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v33, v1
; SI-NEXT: v_cvt_f16_f32_e32 v34, v2
@@ -72273,9 +72237,8 @@ define <32 x bfloat> @bitcast_v32f16_to_v32bf16(<32 x half> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr28
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v63, v31
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32
; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr31
@@ -79163,13 +79126,12 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr75
; VI-NEXT: s_branch .LBB105_2
; VI-NEXT: .LBB105_4:
-; VI-NEXT: v_mov_b32_e32 v1, s58
; VI-NEXT: v_mov_b32_e32 v53, s56
; VI-NEXT: v_mov_b32_e32 v52, s42
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v52, s44
+; VI-NEXT: v_mov_b32_e32 v1, s58
; VI-NEXT: v_mov_b32_e32 v19, s67
; VI-NEXT: v_mov_b32_e32 v12, s66
; VI-NEXT: v_mov_b32_e32 v20, s65
@@ -79215,6 +79177,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v45, s78
; VI-NEXT: v_mov_b32_e32 v42, s76
; VI-NEXT: v_mov_b32_e32 v55, s74
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v54, s57
; VI-NEXT: v_mov_b32_e32 v41, s59
; VI-NEXT: v_mov_b32_e32 v44, s60
@@ -80286,6 +80249,14 @@ define <32 x half> @bitcast_v64i8_to_v32f16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v9
@@ -80360,19 +80331,10 @@ define <32 x half> @bitcast_v64i8_to_v32f16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:116
-; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52
; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v31
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v32
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v33
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v34
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v35, 8, v35
; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v36
; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v37
@@ -80390,7 +80352,7 @@ define <32 x half> @bitcast_v64i8_to_v32f16(<64 x i8> %a, i32 %b) {
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v19, 0xff, v55
; SI-NEXT: v_or_b32_e32 v16, v19, v16
; SI-NEXT: v_cvt_f32_f16_e32 v34, v16
@@ -80403,7 +80365,6 @@ define <32 x half> @bitcast_v64i8_to_v32f16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v12, 0xff, v18
; SI-NEXT: v_or_b32_e32 v10, v12, v10
; SI-NEXT: v_cvt_f32_f16_e32 v21, v10
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v10, 0xff, v41
; SI-NEXT: v_or_b32_e32 v8, v10, v8
; SI-NEXT: v_cvt_f32_f16_e32 v38, v8
@@ -80428,6 +80389,7 @@ define <32 x half> @bitcast_v64i8_to_v32f16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v0, 0xff, v56
; SI-NEXT: v_or_b32_e32 v0, v0, v3
; SI-NEXT: v_cvt_f32_f16_e32 v29, v0
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v0, 0xff, v6
; SI-NEXT: v_or_b32_e32 v0, v0, v46
; SI-NEXT: v_cvt_f32_f16_e32 v54, v0
@@ -80634,13 +80596,12 @@ define <32 x half> @bitcast_v64i8_to_v32f16(<64 x i8> %a, i32 %b) {
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB106_4
; SI-NEXT: ; %bb.3: ; %cmp.true
-; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v56
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v6, vcc, 3, v6
; SI-NEXT: v_and_b32_e32 v7, 0xff, v7
; SI-NEXT: v_and_b32_e32 v6, 0xff, v6
; SI-NEXT: v_or_b32_e32 v7, v3, v7
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v47
; SI-NEXT: v_or_b32_e32 v6, v46, v6
; SI-NEXT: v_and_b32_e32 v3, 0xff, v3
@@ -80648,12 +80609,10 @@ define <32 x half> @bitcast_v64i8_to_v32f16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_or_b32_e32 v9, v35, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, s6, v6
; SI-NEXT: v_add_i32_e32 v6, vcc, s6, v7
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v42
; SI-NEXT: v_and_b32_e32 v7, 0xff, v7
; SI-NEXT: v_or_b32_e32 v7, v39, v7
; SI-NEXT: v_add_i32_e32 v23, vcc, s6, v7
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v41
; SI-NEXT: v_and_b32_e32 v7, 0xff, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
@@ -80852,13 +80811,6 @@ define <32 x half> @bitcast_v64i8_to_v32f16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_cvt_f32_f16_e32 v31, v1
; SI-NEXT: .LBB106_4: ; %end
; SI-NEXT: s_or_b64 exec, exec, s[4:5]
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
@@ -80875,14 +80827,21 @@ define <32 x half> @bitcast_v64i8_to_v32f16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v8, v33
; SI-NEXT: v_mov_b32_e32 v10, v37
; SI-NEXT: v_mov_b32_e32 v12, v49
; SI-NEXT: v_mov_b32_e32 v14, v53
; SI-NEXT: v_mov_b32_e32 v16, v32
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_mov_b32_e32 v18, v34
; SI-NEXT: v_mov_b32_e32 v20, v36
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_mov_b32_e32 v22, v38
; SI-NEXT: v_mov_b32_e32 v24, v48
; SI-NEXT: v_mov_b32_e32 v26, v50
@@ -84461,22 +84420,6 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; SI-LABEL: bitcast_v32bf16_to_v64i8:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32
@@ -84542,6 +84485,22 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: v_mul_f32_e32 v36, 1.0, v2
; SI-NEXT: v_mul_f32_e32 v31, 1.0, v4
; SI-NEXT: v_mul_f32_e32 v35, 1.0, v3
@@ -84605,11 +84564,9 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr58
; SI-NEXT: ; implicit-def: $vgpr58
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v37
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_mul_f32_e32 v30, 1.0, v48
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mul_f32_e32 v29, 1.0, v50
; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; implicit-def: $vgpr50
@@ -90429,6 +90386,8 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:92
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:36
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:116
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:124
; SI-NEXT: v_lshlrev_b32_e32 v63, 8, v13
; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v21
; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v27
@@ -90458,28 +90417,30 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v12
; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v17
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v20
-; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v24
-; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v28
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:72
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:40
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v31
-; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v46, 24, v32
-; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v33
-; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v35, 8, v34
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v61, 24, v36
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr34
; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:20
; SI-NEXT: s_waitcnt expcnt(0)
@@ -90496,8 +90457,6 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v3
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:116
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:124
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v5
@@ -90513,16 +90472,8 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v19
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:40
; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v23
; SI-NEXT: ; kill: killed $vgpr3
@@ -90803,7 +90754,6 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB110_4
; SI-NEXT: ; %bb.3: ; %cmp.true
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v45
; SI-NEXT: v_and_b32_e32 v3, 0xff, v3
; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v16
@@ -90829,7 +90779,6 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_or_b32_e32 v5, v58, v5
; SI-NEXT: v_or_b32_e32 v3, v5, v3
; SI-NEXT: v_add_i32_e32 v9, vcc, s7, v3
-; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v60
; SI-NEXT: v_and_b32_e32 v3, 0xff, v3
; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v26
@@ -90841,7 +90790,6 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_or_b32_e32 v5, v46, v5
; SI-NEXT: v_or_b32_e32 v3, v5, v3
; SI-NEXT: v_add_i32_e32 v25, vcc, s7, v3
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v3, 0xff, v3
; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v59
@@ -90854,7 +90802,6 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_or_b32_e32 v5, v12, v5
; SI-NEXT: v_or_b32_e32 v3, v5, v3
; SI-NEXT: v_add_i32_e32 v12, vcc, s7, v3
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v47
; SI-NEXT: v_and_b32_e32 v3, 0xff, v3
; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v20
@@ -90868,7 +90815,6 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_add_i32_e32 v8, vcc, s7, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v44
; SI-NEXT: v_and_b32_e32 v3, 0xff, v3
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v28
; SI-NEXT: v_or_b32_e32 v3, v5, v3
; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v24
@@ -91086,11 +91032,8 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v6
; SI-NEXT: .LBB110_4: ; %end
; SI-NEXT: s_or_b64 exec, exec, s[4:5]
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v2, v43
; SI-NEXT: v_mov_b32_e32 v10, v41
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_mov_b32_e32 v28, v40
; SI-NEXT: v_mov_b32_e32 v30, v42
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
@@ -91109,6 +91052,8 @@ define <32 x bfloat> @bitcast_v64i8_to_v32bf16(<64 x i8> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v4, v33
; SI-NEXT: v_mov_b32_e32 v6, v39
; SI-NEXT: v_mov_b32_e32 v8, v51
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
index 5d4df4b..7bd2c7a 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
@@ -6164,6 +6164,14 @@ define <18 x i32> @bitcast_v36f16_to_v18i32(<36 x half> %a, i32 %b) {
; SI-LABEL: bitcast_v36f16_to_v18i32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:20
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
@@ -6180,36 +6188,28 @@ define <18 x i32> @bitcast_v36f16_to_v18i32(<36 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:20
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
; SI-NEXT: v_cvt_f16_f32_e32 v35, v1
; SI-NEXT: v_cvt_f16_f32_e32 v33, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v32, v2
; SI-NEXT: v_cvt_f16_f32_e32 v63, v5
; SI-NEXT: v_cvt_f16_f32_e32 v62, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v61, v7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v7
; SI-NEXT: v_cvt_f16_f32_e32 v60, v6
; SI-NEXT: v_cvt_f16_f32_e32 v59, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v8
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v8
; SI-NEXT: v_cvt_f16_f32_e32 v57, v11
; SI-NEXT: v_cvt_f16_f32_e32 v56, v10
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v13
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v13
; SI-NEXT: v_cvt_f16_f32_e32 v46, v12
; SI-NEXT: v_cvt_f16_f32_e32 v45, v15
; SI-NEXT: v_cvt_f16_f32_e32 v44, v14
@@ -6224,14 +6224,12 @@ define <18 x i32> @bitcast_v36f16_to_v18i32(<36 x half> %a, i32 %b) {
; SI-NEXT: v_cvt_f16_f32_e32 v51, v25
; SI-NEXT: v_cvt_f16_f32_e32 v50, v24
; SI-NEXT: v_cvt_f16_f32_e32 v49, v27
-; SI-NEXT: s_waitcnt vmcnt(9) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(9) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v36
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_cvt_f16_f32_e32 v36, v39
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v48
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -13435,6 +13433,14 @@ define <18 x float> @bitcast_v36f16_to_v18f32(<36 x half> %a, i32 %b) {
; SI-LABEL: bitcast_v36f16_to_v18f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:20
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
@@ -13451,36 +13457,28 @@ define <18 x float> @bitcast_v36f16_to_v18f32(<36 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:20
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
; SI-NEXT: v_cvt_f16_f32_e32 v35, v1
; SI-NEXT: v_cvt_f16_f32_e32 v33, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v32, v2
; SI-NEXT: v_cvt_f16_f32_e32 v63, v5
; SI-NEXT: v_cvt_f16_f32_e32 v62, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v61, v7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v7
; SI-NEXT: v_cvt_f16_f32_e32 v60, v6
; SI-NEXT: v_cvt_f16_f32_e32 v59, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v8
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v8
; SI-NEXT: v_cvt_f16_f32_e32 v57, v11
; SI-NEXT: v_cvt_f16_f32_e32 v56, v10
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v13
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v13
; SI-NEXT: v_cvt_f16_f32_e32 v46, v12
; SI-NEXT: v_cvt_f16_f32_e32 v45, v15
; SI-NEXT: v_cvt_f16_f32_e32 v44, v14
@@ -13495,14 +13493,12 @@ define <18 x float> @bitcast_v36f16_to_v18f32(<36 x half> %a, i32 %b) {
; SI-NEXT: v_cvt_f16_f32_e32 v51, v25
; SI-NEXT: v_cvt_f16_f32_e32 v50, v24
; SI-NEXT: v_cvt_f16_f32_e32 v49, v27
-; SI-NEXT: s_waitcnt vmcnt(9) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(9) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v36
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_cvt_f16_f32_e32 v36, v39
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v48
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -19656,6 +19652,14 @@ define <9 x i64> @bitcast_v36f16_to_v9i64(<36 x half> %a, i32 %b) {
; SI-LABEL: bitcast_v36f16_to_v9i64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:20
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
@@ -19672,36 +19676,28 @@ define <9 x i64> @bitcast_v36f16_to_v9i64(<36 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:20
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
; SI-NEXT: v_cvt_f16_f32_e32 v35, v1
; SI-NEXT: v_cvt_f16_f32_e32 v33, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v32, v2
; SI-NEXT: v_cvt_f16_f32_e32 v63, v5
; SI-NEXT: v_cvt_f16_f32_e32 v62, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v61, v7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v7
; SI-NEXT: v_cvt_f16_f32_e32 v60, v6
; SI-NEXT: v_cvt_f16_f32_e32 v59, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v8
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v8
; SI-NEXT: v_cvt_f16_f32_e32 v57, v11
; SI-NEXT: v_cvt_f16_f32_e32 v56, v10
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v13
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v13
; SI-NEXT: v_cvt_f16_f32_e32 v46, v12
; SI-NEXT: v_cvt_f16_f32_e32 v45, v15
; SI-NEXT: v_cvt_f16_f32_e32 v44, v14
@@ -19716,14 +19712,12 @@ define <9 x i64> @bitcast_v36f16_to_v9i64(<36 x half> %a, i32 %b) {
; SI-NEXT: v_cvt_f16_f32_e32 v51, v25
; SI-NEXT: v_cvt_f16_f32_e32 v50, v24
; SI-NEXT: v_cvt_f16_f32_e32 v49, v27
-; SI-NEXT: s_waitcnt vmcnt(9) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(9) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v36
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_cvt_f16_f32_e32 v36, v39
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v48
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -25282,6 +25276,14 @@ define <9 x double> @bitcast_v36f16_to_v9f64(<36 x half> %a, i32 %b) {
; SI-LABEL: bitcast_v36f16_to_v9f64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:20
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
@@ -25298,36 +25300,28 @@ define <9 x double> @bitcast_v36f16_to_v9f64(<36 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:20
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
; SI-NEXT: v_cvt_f16_f32_e32 v35, v1
; SI-NEXT: v_cvt_f16_f32_e32 v33, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v32, v2
; SI-NEXT: v_cvt_f16_f32_e32 v63, v5
; SI-NEXT: v_cvt_f16_f32_e32 v62, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v61, v7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v7
; SI-NEXT: v_cvt_f16_f32_e32 v60, v6
; SI-NEXT: v_cvt_f16_f32_e32 v59, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v8
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v8
; SI-NEXT: v_cvt_f16_f32_e32 v57, v11
; SI-NEXT: v_cvt_f16_f32_e32 v56, v10
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v13
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v13
; SI-NEXT: v_cvt_f16_f32_e32 v46, v12
; SI-NEXT: v_cvt_f16_f32_e32 v45, v15
; SI-NEXT: v_cvt_f16_f32_e32 v44, v14
@@ -25342,14 +25336,12 @@ define <9 x double> @bitcast_v36f16_to_v9f64(<36 x half> %a, i32 %b) {
; SI-NEXT: v_cvt_f16_f32_e32 v51, v25
; SI-NEXT: v_cvt_f16_f32_e32 v50, v24
; SI-NEXT: v_cvt_f16_f32_e32 v49, v27
-; SI-NEXT: s_waitcnt vmcnt(9) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(9) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v36
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_cvt_f16_f32_e32 v36, v39
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v48
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -26798,22 +26790,6 @@ define <36 x half> @bitcast_v36i16_to_v36f16(<36 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v36i16_to_v36f16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:16
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:12
@@ -26838,6 +26814,22 @@ define <36 x half> @bitcast_v36i16_to_v36f16(<36 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; kill: killed $vgpr48
; SI-NEXT: ; implicit-def: $vgpr48
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: ; implicit-def: $vgpr62
; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr63
@@ -26865,7 +26857,7 @@ define <36 x half> @bitcast_v36i16_to_v36f16(<36 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr50
; SI-NEXT: ; kill: killed $vgpr48
; SI-NEXT: ; implicit-def: $vgpr48
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -26892,7 +26884,7 @@ define <36 x half> @bitcast_v36i16_to_v36f16(<36 x i16> %a, i32 %b) {
; SI-NEXT: v_cvt_f32_f16_e32 v47, v9
; SI-NEXT: v_cvt_f32_f16_e32 v60, v10
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, v39
; SI-NEXT: v_cvt_f32_f16_e32 v45, v11
; SI-NEXT: v_cvt_f32_f16_e32 v58, v12
@@ -26977,7 +26969,6 @@ define <36 x half> @bitcast_v36i16_to_v36f16(<36 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, v27
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v39, vcc, 3, v39
; SI-NEXT: v_add_i32_e32 v34, vcc, 3, v34
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
index 44cfd6c..8964ebd 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
@@ -3541,6 +3541,17 @@ define <20 x i32> @bitcast_v40i16_to_v20i32(<40 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v40i16_to_v20i32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
+; SI-NEXT: v_mov_b32_e32 v49, v12
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
@@ -3562,17 +3573,6 @@ define <20 x i32> @bitcast_v40i16_to_v20i32(<40 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
-; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: v_mov_b32_e32 v37, v20
; SI-NEXT: v_mov_b32_e32 v38, v18
; SI-NEXT: v_mov_b32_e32 v39, v16
@@ -3594,13 +3594,10 @@ define <20 x i32> @bitcast_v40i16_to_v20i32(<40 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v29
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:4
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
@@ -4914,7 +4911,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
@@ -4947,7 +4944,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
@@ -4980,7 +4977,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
@@ -5073,7 +5070,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
@@ -5106,7 +5103,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
@@ -5139,7 +5136,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
@@ -8520,7 +8517,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
@@ -8553,7 +8550,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
@@ -8586,7 +8583,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
@@ -8679,7 +8676,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
@@ -8712,7 +8709,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
@@ -8745,7 +8742,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
@@ -11740,6 +11737,17 @@ define <20 x float> @bitcast_v40i16_to_v20f32(<40 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v40i16_to_v20f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
+; SI-NEXT: v_mov_b32_e32 v49, v12
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
@@ -11761,17 +11769,6 @@ define <20 x float> @bitcast_v40i16_to_v20f32(<40 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
-; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: v_mov_b32_e32 v37, v20
; SI-NEXT: v_mov_b32_e32 v38, v18
; SI-NEXT: v_mov_b32_e32 v39, v16
@@ -11793,13 +11790,10 @@ define <20 x float> @bitcast_v40i16_to_v20f32(<40 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v29
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:4
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
@@ -13113,7 +13107,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
@@ -13146,7 +13140,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
@@ -13179,7 +13173,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
@@ -13272,7 +13266,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
@@ -13305,7 +13299,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
@@ -13338,7 +13332,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
@@ -16833,7 +16827,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
@@ -16866,7 +16860,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
@@ -16899,7 +16893,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
@@ -16992,7 +16986,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
@@ -17025,7 +17019,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
@@ -17058,7 +17052,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
@@ -19249,6 +19243,17 @@ define <10 x i64> @bitcast_v40i16_to_v10i64(<40 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v40i16_to_v10i64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
+; SI-NEXT: v_mov_b32_e32 v49, v12
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
@@ -19270,17 +19275,6 @@ define <10 x i64> @bitcast_v40i16_to_v10i64(<40 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
-; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: v_mov_b32_e32 v37, v20
; SI-NEXT: v_mov_b32_e32 v38, v18
; SI-NEXT: v_mov_b32_e32 v39, v16
@@ -19302,13 +19296,10 @@ define <10 x i64> @bitcast_v40i16_to_v10i64(<40 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v29
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:4
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
@@ -20622,7 +20613,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
@@ -20655,7 +20646,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
@@ -20688,7 +20679,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
@@ -20781,7 +20772,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
@@ -20814,7 +20805,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
@@ -20847,7 +20838,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
@@ -24238,7 +24229,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
@@ -24271,7 +24262,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
@@ -24304,7 +24295,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
@@ -24397,7 +24388,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
@@ -24430,7 +24421,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
@@ -24463,7 +24454,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
@@ -25988,6 +25979,17 @@ define <10 x double> @bitcast_v40i16_to_v10f64(<40 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v40i16_to_v10f64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
+; SI-NEXT: v_mov_b32_e32 v49, v12
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
@@ -26009,17 +26011,6 @@ define <10 x double> @bitcast_v40i16_to_v10f64(<40 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
-; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: v_mov_b32_e32 v37, v20
; SI-NEXT: v_mov_b32_e32 v38, v18
; SI-NEXT: v_mov_b32_e32 v39, v16
@@ -26041,13 +26032,10 @@ define <10 x double> @bitcast_v40i16_to_v10f64(<40 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v29
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:4
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
@@ -27361,7 +27349,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
@@ -27394,7 +27382,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
@@ -27427,7 +27415,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
@@ -27520,7 +27508,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
@@ -27553,7 +27541,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
@@ -27586,7 +27574,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
@@ -31014,7 +31002,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:296
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:292
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:288
@@ -31047,7 +31035,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:172
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:168
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:164
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:160
@@ -31080,7 +31068,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:44
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:40
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:36
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:32
@@ -31173,7 +31161,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, v135 :: v_dual_mov_b32 v16, v152
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v17, v170
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v186 :: v_dual_mov_b32 v19, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v185, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v184, off, s32 offset:8
@@ -31206,7 +31194,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:136
@@ -31239,7 +31227,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xa
+; GFX11-TRUE16-NEXT: s_clause 0xa ; 44-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:264
@@ -31389,6 +31377,17 @@ define <40 x half> @bitcast_v40i16_to_v40f16(<40 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v40i16_to_v40f16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
@@ -31405,17 +31404,6 @@ define <40 x half> @bitcast_v40i16_to_v40f16(<40 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:8
; SI-NEXT: ; implicit-def: $vgpr40
; SI-NEXT: ; kill: killed $vgpr40
; SI-NEXT: ; implicit-def: $vgpr40
@@ -31472,7 +31460,7 @@ define <40 x half> @bitcast_v40i16_to_v40f16(<40 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr42
; SI-NEXT: ; kill: killed $vgpr40
; SI-NEXT: ; implicit-def: $vgpr40
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -31523,7 +31511,6 @@ define <40 x half> @bitcast_v40i16_to_v40f16(<40 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, v30
-; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_cvt_f32_f16_e32 v40, v48
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
@@ -31623,7 +31610,6 @@ define <40 x half> @bitcast_v40i16_to_v40f16(<40 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, v27
; SI-NEXT: v_add_i32_e32 v39, vcc, 3, v39
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_add_i32_e32 v49, vcc, 3, v49
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -31643,7 +31629,6 @@ define <40 x half> @bitcast_v40i16_to_v40f16(<40 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, v38
-; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_add_i32_e32 v48, vcc, 3, v48
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
index 87d5157..ed407c1 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
@@ -3792,6 +3792,17 @@ define <22 x i32> @bitcast_v44i16_to_v22i32(<44 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v44i16_to_v22i32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v39, v16
+; SI-NEXT: v_mov_b32_e32 v48, v14
+; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
@@ -3814,17 +3825,6 @@ define <22 x i32> @bitcast_v44i16_to_v22i32(<44 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v39, v16
-; SI-NEXT: v_mov_b32_e32 v48, v14
-; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: v_mov_b32_e32 v38, v18
; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v3
@@ -3842,9 +3842,8 @@ define <22 x i32> @bitcast_v44i16_to_v22i32(<44 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8
@@ -5329,7 +5328,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
@@ -5362,7 +5361,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
@@ -5395,7 +5394,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
@@ -5496,7 +5495,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
@@ -5529,7 +5528,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
@@ -5562,7 +5561,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
@@ -9311,7 +9310,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
@@ -9344,7 +9343,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
@@ -9377,7 +9376,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
@@ -9478,7 +9477,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
@@ -9511,7 +9510,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
@@ -9544,7 +9543,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
@@ -12755,6 +12754,17 @@ define <22 x float> @bitcast_v44i16_to_v22f32(<44 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v44i16_to_v22f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v39, v16
+; SI-NEXT: v_mov_b32_e32 v48, v14
+; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
@@ -12777,17 +12787,6 @@ define <22 x float> @bitcast_v44i16_to_v22f32(<44 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v39, v16
-; SI-NEXT: v_mov_b32_e32 v48, v14
-; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: v_mov_b32_e32 v38, v18
; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v3
@@ -12805,9 +12804,8 @@ define <22 x float> @bitcast_v44i16_to_v22f32(<44 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8
@@ -14292,7 +14290,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
@@ -14325,7 +14323,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
@@ -14358,7 +14356,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
@@ -14459,7 +14457,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
@@ -14492,7 +14490,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
@@ -14525,7 +14523,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
@@ -18407,7 +18405,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
@@ -18440,7 +18438,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
@@ -18473,7 +18471,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
@@ -18574,7 +18572,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
@@ -18607,7 +18605,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
@@ -18640,7 +18638,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
@@ -21004,6 +21002,17 @@ define <11 x i64> @bitcast_v44i16_to_v11i64(<44 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v44i16_to_v11i64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v39, v16
+; SI-NEXT: v_mov_b32_e32 v48, v14
+; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
@@ -21026,17 +21035,6 @@ define <11 x i64> @bitcast_v44i16_to_v11i64(<44 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v39, v16
-; SI-NEXT: v_mov_b32_e32 v48, v14
-; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: v_mov_b32_e32 v38, v18
; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v3
@@ -21054,9 +21052,8 @@ define <11 x i64> @bitcast_v44i16_to_v11i64(<44 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8
@@ -22541,7 +22538,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
@@ -22574,7 +22571,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
@@ -22607,7 +22604,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
@@ -22708,7 +22705,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
@@ -22741,7 +22738,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
@@ -22774,7 +22771,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
@@ -26535,7 +26532,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
@@ -26568,7 +26565,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
@@ -26601,7 +26598,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
@@ -26702,7 +26699,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
@@ -26735,7 +26732,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
@@ -26768,7 +26765,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
@@ -28420,6 +28417,17 @@ define <11 x double> @bitcast_v44i16_to_v11f64(<44 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v44i16_to_v11f64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v39, v16
+; SI-NEXT: v_mov_b32_e32 v48, v14
+; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
@@ -28442,17 +28450,6 @@ define <11 x double> @bitcast_v44i16_to_v11f64(<44 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v39, v16
-; SI-NEXT: v_mov_b32_e32 v48, v14
-; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: v_mov_b32_e32 v38, v18
; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v3
@@ -28470,9 +28467,8 @@ define <11 x double> @bitcast_v44i16_to_v11f64(<44 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8
@@ -29957,7 +29953,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
@@ -29990,7 +29986,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
@@ -30023,7 +30019,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
@@ -30124,7 +30120,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
@@ -30157,7 +30153,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
@@ -30190,7 +30186,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
@@ -33996,7 +33992,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:304
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:300
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:296
@@ -34029,7 +34025,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:180
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:176
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:172
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:168
@@ -34062,7 +34058,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:52
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:48
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:44
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:40
@@ -34163,7 +34159,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, v170 :: v_dual_mov_b32 v18, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v187 :: v_dual_mov_b32 v20, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v21, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v187, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v186, off, s32 offset:8
@@ -34196,7 +34192,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:136
@@ -34229,7 +34225,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xc
+; GFX11-TRUE16-NEXT: s_clause 0xc ; 52-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:264
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
index fb2e94f..9ec3f5c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
@@ -4045,6 +4045,22 @@ define <24 x i32> @bitcast_v48i16_to_v24i32(<48 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v48i16_to_v24i32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v48, v14
+; SI-NEXT: v_mov_b32_e32 v49, v12
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
@@ -4069,22 +4085,6 @@ define <24 x i32> @bitcast_v48i16_to_v24i32(<48 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v48, v14
-; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:60
; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v5
@@ -4100,21 +4100,14 @@ define <24 x i32> @bitcast_v48i16_to_v24i32(<48 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v12
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56
@@ -5806,7 +5799,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
@@ -5839,7 +5832,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
@@ -5872,7 +5865,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
@@ -5979,7 +5972,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
@@ -6012,7 +6005,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
@@ -6045,7 +6038,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
@@ -8179,6 +8172,8 @@ define <24 x i32> @bitcast_v48f16_to_v24i32(<48 x half> %a, i32 %b) {
; SI-LABEL: bitcast_v48f16_to_v24i32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
@@ -8195,8 +8190,6 @@ define <24 x i32> @bitcast_v48f16_to_v24i32(<48 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4
@@ -8223,34 +8216,34 @@ define <24 x i32> @bitcast_v48f16_to_v24i32(<48 x half> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; SI-NEXT: v_cvt_f16_f32_e32 v55, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v3
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v17
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v3
; SI-NEXT: v_cvt_f16_f32_e32 v52, v2
; SI-NEXT: v_cvt_f16_f32_e32 v51, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v16
+; SI-NEXT: v_cvt_f16_f32_e32 v50, v4
; SI-NEXT: v_cvt_f16_f32_e32 v49, v7
; SI-NEXT: v_cvt_f16_f32_e32 v48, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v39, v9
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v39, v9
; SI-NEXT: v_cvt_f16_f32_e32 v38, v8
; SI-NEXT: v_cvt_f16_f32_e32 v37, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v10
; SI-NEXT: v_cvt_f16_f32_e32 v35, v13
; SI-NEXT: v_cvt_f16_f32_e32 v34, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v33, v15
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v15
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v20
@@ -10214,7 +10207,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
@@ -10247,7 +10240,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
@@ -10280,7 +10273,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
@@ -10387,7 +10380,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
@@ -10420,7 +10413,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
@@ -10453,7 +10446,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
@@ -13882,6 +13875,22 @@ define <24 x float> @bitcast_v48i16_to_v24f32(<48 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v48i16_to_v24f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v48, v14
+; SI-NEXT: v_mov_b32_e32 v49, v12
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
@@ -13906,22 +13915,6 @@ define <24 x float> @bitcast_v48i16_to_v24f32(<48 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v48, v14
-; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:60
; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v5
@@ -13937,21 +13930,14 @@ define <24 x float> @bitcast_v48i16_to_v24f32(<48 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v12
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56
@@ -15643,7 +15629,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
@@ -15676,7 +15662,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
@@ -15709,7 +15695,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
@@ -15816,7 +15802,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
@@ -15849,7 +15835,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
@@ -15882,7 +15868,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
@@ -18157,6 +18143,8 @@ define <24 x float> @bitcast_v48f16_to_v24f32(<48 x half> %a, i32 %b) {
; SI-LABEL: bitcast_v48f16_to_v24f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
@@ -18173,8 +18161,6 @@ define <24 x float> @bitcast_v48f16_to_v24f32(<48 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4
@@ -18201,34 +18187,34 @@ define <24 x float> @bitcast_v48f16_to_v24f32(<48 x half> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; SI-NEXT: v_cvt_f16_f32_e32 v55, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v3
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v17
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v3
; SI-NEXT: v_cvt_f16_f32_e32 v52, v2
; SI-NEXT: v_cvt_f16_f32_e32 v51, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v16
+; SI-NEXT: v_cvt_f16_f32_e32 v50, v4
; SI-NEXT: v_cvt_f16_f32_e32 v49, v7
; SI-NEXT: v_cvt_f16_f32_e32 v48, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v39, v9
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v39, v9
; SI-NEXT: v_cvt_f16_f32_e32 v38, v8
; SI-NEXT: v_cvt_f16_f32_e32 v37, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v10
; SI-NEXT: v_cvt_f16_f32_e32 v35, v13
; SI-NEXT: v_cvt_f16_f32_e32 v34, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v33, v15
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v15
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v20
@@ -20192,7 +20178,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
@@ -20225,7 +20211,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
@@ -20258,7 +20244,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
@@ -20365,7 +20351,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
@@ -20398,7 +20384,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
@@ -20431,7 +20417,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
@@ -22982,6 +22968,22 @@ define <12 x i64> @bitcast_v48i16_to_v12i64(<48 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v48i16_to_v12i64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v48, v14
+; SI-NEXT: v_mov_b32_e32 v49, v12
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
@@ -23006,22 +23008,6 @@ define <12 x i64> @bitcast_v48i16_to_v12i64(<48 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v48, v14
-; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:60
; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v5
@@ -23037,21 +23023,14 @@ define <12 x i64> @bitcast_v48i16_to_v12i64(<48 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v12
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56
@@ -24743,7 +24722,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
@@ -24776,7 +24755,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
@@ -24809,7 +24788,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
@@ -24916,7 +24895,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
@@ -24949,7 +24928,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
@@ -24982,7 +24961,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
@@ -27128,6 +27107,8 @@ define <12 x i64> @bitcast_v48f16_to_v12i64(<48 x half> %a, i32 %b) {
; SI-LABEL: bitcast_v48f16_to_v12i64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
@@ -27144,8 +27125,6 @@ define <12 x i64> @bitcast_v48f16_to_v12i64(<48 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4
@@ -27172,34 +27151,34 @@ define <12 x i64> @bitcast_v48f16_to_v12i64(<48 x half> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; SI-NEXT: v_cvt_f16_f32_e32 v55, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v3
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v17
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v3
; SI-NEXT: v_cvt_f16_f32_e32 v52, v2
; SI-NEXT: v_cvt_f16_f32_e32 v51, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v16
+; SI-NEXT: v_cvt_f16_f32_e32 v50, v4
; SI-NEXT: v_cvt_f16_f32_e32 v49, v7
; SI-NEXT: v_cvt_f16_f32_e32 v48, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v39, v9
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v39, v9
; SI-NEXT: v_cvt_f16_f32_e32 v38, v8
; SI-NEXT: v_cvt_f16_f32_e32 v37, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v10
; SI-NEXT: v_cvt_f16_f32_e32 v35, v13
; SI-NEXT: v_cvt_f16_f32_e32 v34, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v33, v15
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v15
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v20
@@ -29163,7 +29142,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
@@ -29196,7 +29175,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
@@ -29229,7 +29208,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
@@ -29336,7 +29315,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
@@ -29369,7 +29348,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
@@ -29402,7 +29381,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
@@ -31199,6 +31178,22 @@ define <12 x double> @bitcast_v48i16_to_v12f64(<48 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v48i16_to_v12f64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v48, v14
+; SI-NEXT: v_mov_b32_e32 v49, v12
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
@@ -31223,22 +31218,6 @@ define <12 x double> @bitcast_v48i16_to_v12f64(<48 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v48, v14
-; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:60
; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v5
@@ -31254,21 +31233,14 @@ define <12 x double> @bitcast_v48i16_to_v12f64(<48 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v12
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56
@@ -32960,7 +32932,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
@@ -32993,7 +32965,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
@@ -33026,7 +32998,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
@@ -33133,7 +33105,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
@@ -33166,7 +33138,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
@@ -33199,7 +33171,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
@@ -35392,6 +35364,8 @@ define <12 x double> @bitcast_v48f16_to_v12f64(<48 x half> %a, i32 %b) {
; SI-LABEL: bitcast_v48f16_to_v12f64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
@@ -35408,8 +35382,6 @@ define <12 x double> @bitcast_v48f16_to_v12f64(<48 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4
@@ -35436,34 +35408,34 @@ define <12 x double> @bitcast_v48f16_to_v12f64(<48 x half> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
; SI-NEXT: v_cvt_f16_f32_e32 v55, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v3
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v17
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v3
; SI-NEXT: v_cvt_f16_f32_e32 v52, v2
; SI-NEXT: v_cvt_f16_f32_e32 v51, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v16
+; SI-NEXT: v_cvt_f16_f32_e32 v50, v4
; SI-NEXT: v_cvt_f16_f32_e32 v49, v7
; SI-NEXT: v_cvt_f16_f32_e32 v48, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v39, v9
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v39, v9
; SI-NEXT: v_cvt_f16_f32_e32 v38, v8
; SI-NEXT: v_cvt_f16_f32_e32 v37, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v10
; SI-NEXT: v_cvt_f16_f32_e32 v35, v13
; SI-NEXT: v_cvt_f16_f32_e32 v34, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v33, v15
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v15
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v20
@@ -37427,7 +37399,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:308
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:304
@@ -37460,7 +37432,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:192
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:180
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:176
@@ -37493,7 +37465,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:64
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:60
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:52
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:48
@@ -37600,7 +37572,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v18, v190 :: v_dual_mov_b32 v19, v189
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v187 :: v_dual_mov_b32 v22, v186
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v23, v185
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v188, off, s32 offset:8
@@ -37633,7 +37605,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:136
@@ -37666,7 +37638,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xe
+; GFX11-TRUE16-NEXT: s_clause 0xe ; 60-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:264
@@ -41255,6 +41227,11 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; SI-LABEL: bitcast_v48f16_to_v48i16_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
@@ -41271,11 +41248,6 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_cvt_f16_f32_e32 v61, v2
; SI-NEXT: v_cvt_f16_f32_e32 v55, v3
@@ -41320,16 +41292,12 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v50, s25
; SI-NEXT: v_cvt_f16_f32_e32 v16, s26
; SI-NEXT: v_cvt_f16_f32_e32 v29, s29
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f16_f32_e32 v31, v32
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cvt_f16_f32_e32 v43, v33
; SI-NEXT: v_cvt_f16_f32_e32 v32, v20
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f16_f32_e32 v25, v35
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v2, v37
; SI-NEXT: v_cvt_f16_f32_e32 v20, s22
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
index 07cdbef..c7a1993 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
@@ -4341,6 +4341,19 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v52i16_to_v26i32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:12
+; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
@@ -4366,19 +4379,6 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:48
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:12
-; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v5
@@ -4394,17 +4394,12 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:8
@@ -4429,9 +4424,10 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:72
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:68
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v18
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:64
@@ -4443,10 +4439,9 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:56
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v22
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v22
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:44
@@ -5032,7 +5027,6 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v57, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v24
@@ -5099,6 +5093,7 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 16, v56
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v57
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -5231,6 +5226,9 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB14_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
@@ -5245,9 +5243,6 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v34, v57, s6
; GFX9-NEXT: v_perm_b32 v1, v62, v56, s6
; GFX9-NEXT: v_perm_b32 v2, v33, v47, s6
@@ -5266,6 +5261,10 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v40, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -5294,10 +5293,6 @@ define <26 x i32> @bitcast_v52i16_to_v26i32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v36, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v35, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -6287,7 +6282,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -6320,7 +6315,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -6353,7 +6348,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -6465,7 +6460,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -6498,7 +6493,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -6531,7 +6526,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -9760,7 +9755,6 @@ define <26 x i32> @bitcast_v52f16_to_v26i32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v57, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v24
@@ -9827,6 +9821,7 @@ define <26 x i32> @bitcast_v52f16_to_v26i32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 16, v56
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v57
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -9959,6 +9954,9 @@ define <26 x i32> @bitcast_v52f16_to_v26i32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB18_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
@@ -9973,9 +9971,6 @@ define <26 x i32> @bitcast_v52f16_to_v26i32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v34, v57, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v62, v56, s6
@@ -9995,6 +9990,10 @@ define <26 x i32> @bitcast_v52f16_to_v26i32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v40, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -10023,10 +10022,6 @@ define <26 x i32> @bitcast_v52f16_to_v26i32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v36, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v35, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -10295,14 +10290,28 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
-; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v39
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
@@ -10318,22 +10327,6 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
@@ -10342,8 +10335,8 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v53
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12
@@ -10363,10 +10356,8 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v42
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v56
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v43
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v57
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v62
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36
@@ -10407,11 +10398,11 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v25, v38, v25
; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v53
; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
; SI-NEXT: v_cvt_f32_f16_e32 v10, v55
@@ -10425,7 +10416,6 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
; SI-NEXT: v_cvt_f32_f16_e32 v15, v52
@@ -10463,7 +10453,6 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
@@ -11113,7 +11102,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -11146,7 +11135,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -11179,7 +11168,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -11291,7 +11280,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -11324,7 +11313,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -11357,7 +11346,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -15076,6 +15065,19 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v52i16_to_v26f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:12
+; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
@@ -15101,19 +15103,6 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:48
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:12
-; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v5
@@ -15129,17 +15118,12 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:8
@@ -15164,9 +15148,10 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:72
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:68
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v18
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:64
@@ -15178,10 +15163,9 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:56
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v22
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v22
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:44
@@ -15767,7 +15751,6 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v57, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v24
@@ -15834,6 +15817,7 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 16, v56
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v57
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -15966,6 +15950,9 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB30_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
@@ -15980,9 +15967,6 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v34, v57, s6
; GFX9-NEXT: v_perm_b32 v1, v62, v56, s6
; GFX9-NEXT: v_perm_b32 v2, v33, v47, s6
@@ -16001,6 +15985,10 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v40, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -16029,10 +16017,6 @@ define <26 x float> @bitcast_v52i16_to_v26f32(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v36, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v35, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -17022,7 +17006,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -17055,7 +17039,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -17088,7 +17072,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -17200,7 +17184,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -17233,7 +17217,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -17266,7 +17250,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -20653,7 +20637,6 @@ define <26 x float> @bitcast_v52f16_to_v26f32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v57, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v24
@@ -20720,6 +20703,7 @@ define <26 x float> @bitcast_v52f16_to_v26f32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 16, v56
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v57
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -20852,6 +20836,9 @@ define <26 x float> @bitcast_v52f16_to_v26f32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB34_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
@@ -20866,9 +20853,6 @@ define <26 x float> @bitcast_v52f16_to_v26f32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v34, v57, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v62, v56, s6
@@ -20888,6 +20872,10 @@ define <26 x float> @bitcast_v52f16_to_v26f32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v40, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -20916,10 +20904,6 @@ define <26 x float> @bitcast_v52f16_to_v26f32(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v36, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v35, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -21188,14 +21172,28 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
-; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v39
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
@@ -21211,22 +21209,6 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
@@ -21235,8 +21217,8 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v53
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12
@@ -21256,10 +21238,8 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v42
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v56
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v43
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v57
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v62
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36
@@ -21300,11 +21280,11 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v25, v38, v25
; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v53
; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
; SI-NEXT: v_cvt_f32_f16_e32 v10, v55
@@ -21318,7 +21298,6 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
; SI-NEXT: v_cvt_f32_f16_e32 v15, v52
@@ -21356,7 +21335,6 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
@@ -22006,7 +21984,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -22039,7 +22017,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -22072,7 +22050,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -22184,7 +22162,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -22217,7 +22195,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -22250,7 +22228,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -25023,6 +25001,19 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v52i16_to_v13i64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:12
+; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
@@ -25048,19 +25039,6 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:48
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:12
-; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v5
@@ -25076,17 +25054,12 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:8
@@ -25111,9 +25084,10 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:72
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:68
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v18
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:64
@@ -25125,10 +25099,9 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:56
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v22
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v22
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:44
@@ -25714,7 +25687,6 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v57, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v24
@@ -25781,6 +25753,7 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 16, v56
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v57
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -25913,6 +25886,9 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB42_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
@@ -25927,9 +25903,6 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v34, v57, s6
; GFX9-NEXT: v_perm_b32 v1, v62, v56, s6
; GFX9-NEXT: v_perm_b32 v2, v33, v47, s6
@@ -25948,6 +25921,10 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v40, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -25976,10 +25953,6 @@ define <13 x i64> @bitcast_v52i16_to_v13i64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v36, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v35, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -26969,7 +26942,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -27002,7 +26975,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -27035,7 +27008,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -27147,7 +27120,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -27180,7 +27153,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -27213,7 +27186,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -30457,7 +30430,6 @@ define <13 x i64> @bitcast_v52f16_to_v13i64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v57, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v24
@@ -30524,6 +30496,7 @@ define <13 x i64> @bitcast_v52f16_to_v13i64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 16, v56
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v57
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -30656,6 +30629,9 @@ define <13 x i64> @bitcast_v52f16_to_v13i64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB46_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
@@ -30670,9 +30646,6 @@ define <13 x i64> @bitcast_v52f16_to_v13i64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v34, v57, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v62, v56, s6
@@ -30692,6 +30665,10 @@ define <13 x i64> @bitcast_v52f16_to_v13i64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v40, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -30720,10 +30697,6 @@ define <13 x i64> @bitcast_v52f16_to_v13i64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v36, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v35, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -30992,14 +30965,28 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
-; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v39
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
@@ -31015,22 +31002,6 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
@@ -31039,8 +31010,8 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v53
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12
@@ -31060,10 +31031,8 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v42
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v56
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v43
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v57
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v62
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36
@@ -31104,11 +31073,11 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v25, v38, v25
; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v53
; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
; SI-NEXT: v_cvt_f32_f16_e32 v10, v55
@@ -31122,7 +31091,6 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
; SI-NEXT: v_cvt_f32_f16_e32 v15, v52
@@ -31160,7 +31128,6 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
@@ -31810,7 +31777,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -31843,7 +31810,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -31876,7 +31843,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -31988,7 +31955,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -32021,7 +31988,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -32054,7 +32021,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -34053,6 +34020,19 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v52i16_to_v13f64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v50, v10
+; SI-NEXT: v_mov_b32_e32 v51, v8
+; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: v_mov_b32_e32 v53, v4
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:12
+; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
@@ -34078,19 +34058,6 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v50, v10
-; SI-NEXT: v_mov_b32_e32 v51, v8
-; SI-NEXT: v_mov_b32_e32 v52, v6
-; SI-NEXT: v_mov_b32_e32 v53, v4
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:48
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:12
-; SI-NEXT: v_mov_b32_e32 v49, v12
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v5
@@ -34106,17 +34073,12 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v29
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:8
@@ -34141,9 +34103,10 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:72
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:68
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v18
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:64
@@ -34155,10 +34118,9 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:56
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v22
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v22
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:44
@@ -34744,7 +34706,6 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v57, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v24
@@ -34811,6 +34772,7 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 16, v56
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v57
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -34943,6 +34905,9 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB50_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
@@ -34957,9 +34922,6 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v34, v57, s6
; GFX9-NEXT: v_perm_b32 v1, v62, v56, s6
; GFX9-NEXT: v_perm_b32 v2, v33, v47, s6
@@ -34978,6 +34940,10 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v40, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -35006,10 +34972,6 @@ define <13 x double> @bitcast_v52i16_to_v13f64(<52 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v36, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v35, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -35999,7 +35961,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -36032,7 +35994,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -36065,7 +36027,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -36177,7 +36139,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -36210,7 +36172,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -36243,7 +36205,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -39539,7 +39501,6 @@ define <13 x double> @bitcast_v52f16_to_v13f64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v57, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v24
@@ -39606,6 +39567,7 @@ define <13 x double> @bitcast_v52f16_to_v13f64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v62, 16, v56
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v57
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -39738,6 +39700,9 @@ define <13 x double> @bitcast_v52f16_to_v13f64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB54_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
@@ -39752,9 +39717,6 @@ define <13 x double> @bitcast_v52f16_to_v13f64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v34, v57, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v62, v56, s6
@@ -39774,6 +39736,10 @@ define <13 x double> @bitcast_v52f16_to_v13f64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v40, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -39802,10 +39768,6 @@ define <13 x double> @bitcast_v52f16_to_v13f64(<52 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v36, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v35, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -40074,14 +40036,28 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
-; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v39
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
@@ -40097,22 +40073,6 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
@@ -40121,8 +40081,8 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v53
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12
@@ -40142,10 +40102,8 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v42
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v56
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v43
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v57
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v62
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36
@@ -40186,11 +40144,11 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v25, v38, v25
; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v53
; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
; SI-NEXT: v_cvt_f32_f16_e32 v10, v55
@@ -40204,7 +40162,6 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
; SI-NEXT: v_cvt_f32_f16_e32 v15, v52
@@ -40242,7 +40199,6 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
@@ -40892,7 +40848,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -40925,7 +40881,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -40958,7 +40914,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -41070,7 +41026,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v191 :: v_dual_mov_b32 v20, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v189 :: v_dual_mov_b32 v22, v188
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v187 :: v_dual_mov_b32 v24, v186
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -41103,7 +41059,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -41136,7 +41092,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -45248,6 +45204,15 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; SI-LABEL: bitcast_v52f16_to_v52i16_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:24
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
@@ -45264,15 +45229,6 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:24
; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: v_cvt_f16_f32_e32 v58, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v3
@@ -45317,26 +45273,19 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v41, s21
; SI-NEXT: v_cvt_f16_f32_e32 v16, s26
; SI-NEXT: v_cvt_f16_f32_e32 v54, s29
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_cvt_f16_f32_e32 v53, v32
-; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cvt_f16_f32_e32 v32, v33
-; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_cvt_f16_f32_e32 v34, v34
-; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_cvt_f16_f32_e32 v30, v35
; SI-NEXT: v_cvt_f16_f32_e32 v35, v20
; SI-NEXT: v_cvt_f16_f32_e32 v33, v24
; SI-NEXT: v_cvt_f16_f32_e32 v31, v28
-; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_cvt_f16_f32_e32 v55, v36
-; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_cvt_f16_f32_e32 v4, v38
-; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f16_f32_e32 v27, v39
-; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v2, v49
; SI-NEXT: v_cvt_f16_f32_e32 v24, s18
; SI-NEXT: v_cvt_f16_f32_e32 v20, s22
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
index 8eb71e9..77df03d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
@@ -4665,6 +4665,11 @@ define <28 x i32> @bitcast_v56i16_to_v28i32(<56 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v56i16_to_v28i32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92
+; SI-NEXT: v_mov_b32_e32 v53, v4
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
@@ -4694,11 +4699,6 @@ define <28 x i32> @bitcast_v56i16_to_v28i32(<56 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92
-; SI-NEXT: v_mov_b32_e32 v53, v4
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v5
@@ -4715,9 +4715,8 @@ define <28 x i32> @bitcast_v56i16_to_v28i32(<56 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v29
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88
@@ -5413,7 +5412,6 @@ define <28 x i32> @bitcast_v56i16_to_v28i32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v59, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v26
@@ -5486,6 +5484,7 @@ define <28 x i32> @bitcast_v56i16_to_v28i32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v58
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v59
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v28
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -5634,6 +5633,9 @@ define <28 x i32> @bitcast_v56i16_to_v28i32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB14_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
@@ -5648,9 +5650,6 @@ define <28 x i32> @bitcast_v56i16_to_v28i32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v36, v59, s6
; GFX9-NEXT: v_perm_b32 v1, v35, v58, s6
; GFX9-NEXT: v_perm_b32 v2, v62, v57, s6
@@ -5669,6 +5668,10 @@ define <28 x i32> @bitcast_v56i16_to_v28i32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v42, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -5697,10 +5700,6 @@ define <28 x i32> @bitcast_v56i16_to_v28i32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v38, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v37, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -6780,7 +6779,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -6813,7 +6812,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -6846,7 +6845,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -6960,7 +6959,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -6993,7 +6992,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -7026,7 +7025,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -10560,7 +10559,6 @@ define <28 x i32> @bitcast_v56f16_to_v28i32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v59, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v26
@@ -10633,6 +10631,7 @@ define <28 x i32> @bitcast_v56f16_to_v28i32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v58
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v59
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v28
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -10781,6 +10780,9 @@ define <28 x i32> @bitcast_v56f16_to_v28i32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB18_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
@@ -10795,9 +10797,6 @@ define <28 x i32> @bitcast_v56f16_to_v28i32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v36, v59, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v35, v58, s6
@@ -10817,6 +10816,10 @@ define <28 x i32> @bitcast_v56f16_to_v28i32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v42, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -10845,10 +10848,6 @@ define <28 x i32> @bitcast_v56f16_to_v28i32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v38, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v37, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -11148,7 +11147,20 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -11156,7 +11168,6 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v61
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
@@ -11188,19 +11199,6 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
@@ -11217,11 +11215,11 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_mov_b32_e32 v48, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_mov_b32_e32 v61, v44
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54
; SI-NEXT: v_mov_b32_e32 v39, v11
@@ -11299,6 +11297,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v27, v50, v27
; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
; SI-NEXT: v_cvt_f32_f16_e32 v2, v54
; SI-NEXT: v_cvt_f32_f16_e32 v1, v55
@@ -11317,7 +11316,6 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v1, v3, v2
; SI-NEXT: v_cvt_f32_f16_e32 v2, v49
; SI-NEXT: v_cvt_f32_f16_e32 v3, v39
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v33
; SI-NEXT: v_cvt_f32_f16_e32 v8, v47
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
@@ -11585,7 +11583,6 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
@@ -12044,7 +12041,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -12077,7 +12074,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -12110,7 +12107,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -12224,7 +12221,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -12257,7 +12254,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -12290,7 +12287,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -16290,6 +16287,11 @@ define <28 x float> @bitcast_v56i16_to_v28f32(<56 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v56i16_to_v28f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92
+; SI-NEXT: v_mov_b32_e32 v53, v4
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
@@ -16319,11 +16321,6 @@ define <28 x float> @bitcast_v56i16_to_v28f32(<56 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92
-; SI-NEXT: v_mov_b32_e32 v53, v4
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v5
@@ -16340,9 +16337,8 @@ define <28 x float> @bitcast_v56i16_to_v28f32(<56 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v29
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88
@@ -17038,7 +17034,6 @@ define <28 x float> @bitcast_v56i16_to_v28f32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v59, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v26
@@ -17111,6 +17106,7 @@ define <28 x float> @bitcast_v56i16_to_v28f32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v58
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v59
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v28
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -17259,6 +17255,9 @@ define <28 x float> @bitcast_v56i16_to_v28f32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB30_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
@@ -17273,9 +17272,6 @@ define <28 x float> @bitcast_v56i16_to_v28f32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v36, v59, s6
; GFX9-NEXT: v_perm_b32 v1, v35, v58, s6
; GFX9-NEXT: v_perm_b32 v2, v62, v57, s6
@@ -17294,6 +17290,10 @@ define <28 x float> @bitcast_v56i16_to_v28f32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v42, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -17322,10 +17322,6 @@ define <28 x float> @bitcast_v56i16_to_v28f32(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v38, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v37, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -18405,7 +18401,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -18438,7 +18434,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -18471,7 +18467,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -18585,7 +18581,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -18618,7 +18614,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -18651,7 +18647,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -22343,7 +22339,6 @@ define <28 x float> @bitcast_v56f16_to_v28f32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v59, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v26
@@ -22416,6 +22411,7 @@ define <28 x float> @bitcast_v56f16_to_v28f32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v58
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v59
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v28
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -22564,6 +22560,9 @@ define <28 x float> @bitcast_v56f16_to_v28f32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB34_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
@@ -22578,9 +22577,6 @@ define <28 x float> @bitcast_v56f16_to_v28f32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v36, v59, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v35, v58, s6
@@ -22600,6 +22596,10 @@ define <28 x float> @bitcast_v56f16_to_v28f32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v42, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -22628,10 +22628,6 @@ define <28 x float> @bitcast_v56f16_to_v28f32(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v38, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v37, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -22931,7 +22927,20 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -22939,7 +22948,6 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v61
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
@@ -22971,19 +22979,6 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
@@ -23000,11 +22995,11 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_mov_b32_e32 v48, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_mov_b32_e32 v61, v44
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54
; SI-NEXT: v_mov_b32_e32 v39, v11
@@ -23082,6 +23077,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v27, v50, v27
; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
; SI-NEXT: v_cvt_f32_f16_e32 v2, v54
; SI-NEXT: v_cvt_f32_f16_e32 v1, v55
@@ -23100,7 +23096,6 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v3, v2
; SI-NEXT: v_cvt_f32_f16_e32 v2, v49
; SI-NEXT: v_cvt_f32_f16_e32 v3, v39
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v33
; SI-NEXT: v_cvt_f32_f16_e32 v8, v47
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
@@ -23368,7 +23363,6 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
@@ -23827,7 +23821,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -23860,7 +23854,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -23893,7 +23887,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -24007,7 +24001,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -24040,7 +24034,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -24073,7 +24067,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -27080,6 +27074,11 @@ define <14 x i64> @bitcast_v56i16_to_v14i64(<56 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v56i16_to_v14i64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92
+; SI-NEXT: v_mov_b32_e32 v53, v4
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
@@ -27109,11 +27108,6 @@ define <14 x i64> @bitcast_v56i16_to_v14i64(<56 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92
-; SI-NEXT: v_mov_b32_e32 v53, v4
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v5
@@ -27130,9 +27124,8 @@ define <14 x i64> @bitcast_v56i16_to_v14i64(<56 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v29
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88
@@ -27828,7 +27821,6 @@ define <14 x i64> @bitcast_v56i16_to_v14i64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v59, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v26
@@ -27901,6 +27893,7 @@ define <14 x i64> @bitcast_v56i16_to_v14i64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v58
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v59
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v28
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -28049,6 +28042,9 @@ define <14 x i64> @bitcast_v56i16_to_v14i64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB42_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
@@ -28063,9 +28059,6 @@ define <14 x i64> @bitcast_v56i16_to_v14i64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v36, v59, s6
; GFX9-NEXT: v_perm_b32 v1, v35, v58, s6
; GFX9-NEXT: v_perm_b32 v2, v62, v57, s6
@@ -28084,6 +28077,10 @@ define <14 x i64> @bitcast_v56i16_to_v14i64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v42, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -28112,10 +28109,6 @@ define <14 x i64> @bitcast_v56i16_to_v14i64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v38, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v37, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -29195,7 +29188,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -29228,7 +29221,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -29261,7 +29254,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -29375,7 +29368,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -29408,7 +29401,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -29441,7 +29434,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -32989,7 +32982,6 @@ define <14 x i64> @bitcast_v56f16_to_v14i64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v59, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v26
@@ -33062,6 +33054,7 @@ define <14 x i64> @bitcast_v56f16_to_v14i64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v58
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v59
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v28
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -33210,6 +33203,9 @@ define <14 x i64> @bitcast_v56f16_to_v14i64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB46_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
@@ -33224,9 +33220,6 @@ define <14 x i64> @bitcast_v56f16_to_v14i64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v36, v59, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v35, v58, s6
@@ -33246,6 +33239,10 @@ define <14 x i64> @bitcast_v56f16_to_v14i64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v42, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -33274,10 +33271,6 @@ define <14 x i64> @bitcast_v56f16_to_v14i64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v38, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v37, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -33577,7 +33570,20 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -33585,7 +33591,6 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v61
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
@@ -33617,19 +33622,6 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
@@ -33646,11 +33638,11 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_mov_b32_e32 v48, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_mov_b32_e32 v61, v44
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54
; SI-NEXT: v_mov_b32_e32 v39, v11
@@ -33728,6 +33720,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v27, v50, v27
; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
; SI-NEXT: v_cvt_f32_f16_e32 v2, v54
; SI-NEXT: v_cvt_f32_f16_e32 v1, v55
@@ -33746,7 +33739,6 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v1, v3, v2
; SI-NEXT: v_cvt_f32_f16_e32 v2, v49
; SI-NEXT: v_cvt_f32_f16_e32 v3, v39
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v33
; SI-NEXT: v_cvt_f32_f16_e32 v8, v47
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
@@ -34014,7 +34006,6 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
@@ -34473,7 +34464,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -34506,7 +34497,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -34539,7 +34530,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -34653,7 +34644,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -34686,7 +34677,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -34719,7 +34710,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -36898,6 +36889,11 @@ define <14 x double> @bitcast_v56i16_to_v14f64(<56 x i16> %a, i32 %b) {
; SI-LABEL: bitcast_v56i16_to_v14f64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v54, v2
+; SI-NEXT: v_mov_b32_e32 v55, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92
+; SI-NEXT: v_mov_b32_e32 v53, v4
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
@@ -36927,11 +36923,6 @@ define <14 x double> @bitcast_v56i16_to_v14f64(<56 x i16> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v54, v2
-; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92
-; SI-NEXT: v_mov_b32_e32 v53, v4
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v5
@@ -36948,9 +36939,8 @@ define <14 x double> @bitcast_v56i16_to_v14f64(<56 x i16> %a, i32 %b) {
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v29
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88
@@ -37646,7 +37636,6 @@ define <14 x double> @bitcast_v56i16_to_v14f64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v59, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v26
@@ -37719,6 +37708,7 @@ define <14 x double> @bitcast_v56i16_to_v14f64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v58
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v59
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v28
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -37867,6 +37857,9 @@ define <14 x double> @bitcast_v56i16_to_v14f64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB50_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
@@ -37881,9 +37874,6 @@ define <14 x double> @bitcast_v56i16_to_v14f64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v36, v59, s6
; GFX9-NEXT: v_perm_b32 v1, v35, v58, s6
; GFX9-NEXT: v_perm_b32 v2, v62, v57, s6
@@ -37902,6 +37892,10 @@ define <14 x double> @bitcast_v56i16_to_v14f64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v42, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -37930,10 +37924,6 @@ define <14 x double> @bitcast_v56i16_to_v14f64(<56 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v38, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v37, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -39013,7 +39003,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -39046,7 +39036,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -39079,7 +39069,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -39193,7 +39183,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -39226,7 +39216,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -39259,7 +39249,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -42860,7 +42850,6 @@ define <14 x double> @bitcast_v56f16_to_v14f64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v59, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v26
@@ -42933,6 +42922,7 @@ define <14 x double> @bitcast_v56f16_to_v14f64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v58
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v59
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v28
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -43081,6 +43071,9 @@ define <14 x double> @bitcast_v56f16_to_v14f64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB54_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
@@ -43095,9 +43088,6 @@ define <14 x double> @bitcast_v56f16_to_v14f64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v36, v59, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v35, v58, s6
@@ -43117,6 +43107,10 @@ define <14 x double> @bitcast_v56f16_to_v14f64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v42, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -43145,10 +43139,6 @@ define <14 x double> @bitcast_v56f16_to_v14f64(<56 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v38, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v37, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -43448,7 +43438,20 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -43456,7 +43459,6 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v61
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
@@ -43488,19 +43490,6 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
@@ -43517,11 +43506,11 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_mov_b32_e32 v48, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_mov_b32_e32 v61, v44
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54
; SI-NEXT: v_mov_b32_e32 v39, v11
@@ -43599,6 +43588,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v27, v50, v27
; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
; SI-NEXT: v_cvt_f32_f16_e32 v2, v54
; SI-NEXT: v_cvt_f32_f16_e32 v1, v55
@@ -43617,7 +43607,6 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v1, v3, v2
; SI-NEXT: v_cvt_f32_f16_e32 v2, v49
; SI-NEXT: v_cvt_f32_f16_e32 v3, v39
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v33
; SI-NEXT: v_cvt_f32_f16_e32 v8, v47
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
@@ -43885,7 +43874,6 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
@@ -44344,7 +44332,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v10
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -44377,7 +44365,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -44410,7 +44398,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -44524,7 +44512,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v186 :: v_dual_mov_b32 v20, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v191 :: v_dual_mov_b32 v22, v190
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v189 :: v_dual_mov_b32 v24, v188
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -44557,7 +44545,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -44590,7 +44578,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
index 93c11f1..c9e5771 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
@@ -5032,40 +5032,53 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:4
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:112
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:108
+; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v4
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:104
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:100
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v10
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v6
+; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:96
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:92
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v10
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:84
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v12
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:48
@@ -5096,27 +5109,10 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:56
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:52
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v28
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v30
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v30
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -5201,7 +5197,6 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; kill: killed $vgpr30
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55
; SI-NEXT: v_and_b32_e32 v18, 0xffff, v61
; SI-NEXT: ; kill: killed $vgpr30
@@ -5346,7 +5341,6 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v61
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -5494,7 +5488,7 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: bitcast_v60i16_to_v30i32:
@@ -5776,7 +5770,6 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v61, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v28
@@ -5855,6 +5848,7 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v60
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v61
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -6019,6 +6013,9 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB14_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
@@ -6033,9 +6030,6 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v38, v61, s6
; GFX9-NEXT: v_perm_b32 v1, v37, v60, s6
; GFX9-NEXT: v_perm_b32 v2, v62, v59, s6
@@ -6054,6 +6048,10 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v44, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -6082,10 +6080,6 @@ define <30 x i32> @bitcast_v60i16_to_v30i32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v48, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v39, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -7241,7 +7235,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -7274,7 +7268,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -7307,7 +7301,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -7424,7 +7418,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -7457,7 +7451,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -7490,7 +7484,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -10345,6 +10339,9 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:116
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32
@@ -10373,23 +10370,12 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:88
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:84
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
; SI-NEXT: v_cvt_f16_f32_e32 v59, v1
; SI-NEXT: v_cvt_f16_f32_e32 v57, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v2
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:104
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v60
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v7
@@ -10399,8 +10385,6 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v9
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v8
@@ -10422,9 +10406,18 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v17
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:104
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v16
@@ -10434,6 +10427,7 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v21
@@ -10471,7 +10465,6 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v61
; SI-NEXT: v_cvt_f16_f32_e32 v61, v49
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v49, v55
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -10486,6 +10479,7 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v32
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v32, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -11357,7 +11351,6 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v61, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v28
@@ -11436,6 +11429,7 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v60
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v61
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -11600,6 +11594,9 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB18_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
@@ -11614,9 +11611,6 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v38, v61, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v37, v60, s6
@@ -11636,6 +11630,10 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v44, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -11664,10 +11662,6 @@ define <30 x i32> @bitcast_v60f16_to_v30i32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v48, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v39, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -11988,12 +11982,35 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: v_cvt_f16_f32_e32 v50, v54
; SI-NEXT: v_cvt_f16_f32_e32 v48, v48
; SI-NEXT: v_cvt_f16_f32_e32 v31, v40
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
@@ -12003,7 +12020,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v44
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -12012,7 +12029,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v46
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -12021,7 +12038,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v57
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v58
; SI-NEXT: v_cvt_f16_f32_e32 v58, s16
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
@@ -12032,38 +12049,12 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v34
; SI-NEXT: v_mov_b32_e32 v33, v32
; SI-NEXT: v_or_b32_e32 v10, v32, v10
@@ -12088,12 +12079,12 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
; SI-NEXT: v_or_b32_e32 v1, v12, v1
; SI-NEXT: v_or_b32_e32 v2, v11, v2
@@ -12202,12 +12193,10 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v40, v44
; SI-NEXT: s_cbranch_vccnz .LBB19_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: v_cvt_f32_f16_e32 v1, v58
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
@@ -12993,7 +12982,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -13026,7 +13015,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -13059,7 +13048,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -13176,7 +13165,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -13209,7 +13198,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -13242,7 +13231,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -17570,40 +17559,53 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:4
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:112
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:108
+; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v4
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:104
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:100
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v10
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v6
+; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:96
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:92
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v10
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:84
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v12
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:48
@@ -17634,27 +17636,10 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:56
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:52
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v28
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v30
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v30
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -17739,7 +17724,6 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; kill: killed $vgpr30
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55
; SI-NEXT: v_and_b32_e32 v18, 0xffff, v61
; SI-NEXT: ; kill: killed $vgpr30
@@ -17884,7 +17868,6 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v61
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -18032,7 +18015,7 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: bitcast_v60i16_to_v30f32:
@@ -18314,7 +18297,6 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v61, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v28
@@ -18393,6 +18375,7 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v60
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v61
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -18557,6 +18540,9 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB30_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
@@ -18571,9 +18557,6 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v38, v61, s6
; GFX9-NEXT: v_perm_b32 v1, v37, v60, s6
; GFX9-NEXT: v_perm_b32 v2, v62, v59, s6
@@ -18592,6 +18575,10 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v44, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -18620,10 +18607,6 @@ define <30 x float> @bitcast_v60i16_to_v30f32(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v48, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v39, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -19779,7 +19762,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -19812,7 +19795,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -19845,7 +19828,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -19962,7 +19945,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -19995,7 +19978,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -20028,7 +20011,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -23044,6 +23027,9 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:116
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32
@@ -23072,23 +23058,12 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:88
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:84
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
; SI-NEXT: v_cvt_f16_f32_e32 v59, v1
; SI-NEXT: v_cvt_f16_f32_e32 v57, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v2
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:104
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v60
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v7
@@ -23098,8 +23073,6 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v9
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v8
@@ -23121,9 +23094,18 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v17
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:104
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v16
@@ -23133,6 +23115,7 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v21
@@ -23170,7 +23153,6 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v61
; SI-NEXT: v_cvt_f16_f32_e32 v61, v49
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v49, v55
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -23185,6 +23167,7 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v32
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v32, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -24056,7 +24039,6 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v61, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v28
@@ -24135,6 +24117,7 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v60
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v61
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -24299,6 +24282,9 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB34_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
@@ -24313,9 +24299,6 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v38, v61, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v37, v60, s6
@@ -24335,6 +24318,10 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v44, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -24363,10 +24350,6 @@ define <30 x float> @bitcast_v60f16_to_v30f32(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v48, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v39, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -24687,12 +24670,35 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: v_cvt_f16_f32_e32 v50, v54
; SI-NEXT: v_cvt_f16_f32_e32 v48, v48
; SI-NEXT: v_cvt_f16_f32_e32 v31, v40
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
@@ -24702,7 +24708,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v44
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -24711,7 +24717,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v46
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -24720,7 +24726,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v57
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v58
; SI-NEXT: v_cvt_f16_f32_e32 v58, s16
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
@@ -24731,38 +24737,12 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v34
; SI-NEXT: v_mov_b32_e32 v33, v32
; SI-NEXT: v_or_b32_e32 v10, v32, v10
@@ -24787,12 +24767,12 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
; SI-NEXT: v_or_b32_e32 v1, v12, v1
; SI-NEXT: v_or_b32_e32 v2, v11, v2
@@ -24901,12 +24881,10 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v40, v44
; SI-NEXT: s_cbranch_vccnz .LBB35_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: v_cvt_f32_f16_e32 v1, v58
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
@@ -25692,7 +25670,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -25725,7 +25703,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -25758,7 +25736,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -25875,7 +25853,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -25908,7 +25886,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -25941,7 +25919,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -29240,40 +29218,53 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:4
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:112
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:108
+; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v4
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:104
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:100
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v10
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v6
+; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:96
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:92
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v10
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:84
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v12
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:48
@@ -29304,27 +29295,10 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:56
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:52
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v28
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v30
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v30
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -29409,7 +29383,6 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; kill: killed $vgpr30
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55
; SI-NEXT: v_and_b32_e32 v18, 0xffff, v61
; SI-NEXT: ; kill: killed $vgpr30
@@ -29554,7 +29527,6 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v61
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -29702,7 +29674,7 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: bitcast_v60i16_to_v15i64:
@@ -29984,7 +29956,6 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v61, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v28
@@ -30063,6 +30034,7 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v60
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v61
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -30227,6 +30199,9 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB42_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
@@ -30241,9 +30216,6 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v38, v61, s6
; GFX9-NEXT: v_perm_b32 v1, v37, v60, s6
; GFX9-NEXT: v_perm_b32 v2, v62, v59, s6
@@ -30262,6 +30234,10 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v44, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -30290,10 +30266,6 @@ define <15 x i64> @bitcast_v60i16_to_v15i64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v48, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v39, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -31449,7 +31421,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -31482,7 +31454,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -31515,7 +31487,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -31632,7 +31604,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -31665,7 +31637,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -31698,7 +31670,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -34570,6 +34542,9 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:116
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32
@@ -34598,23 +34573,12 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:88
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:84
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
; SI-NEXT: v_cvt_f16_f32_e32 v59, v1
; SI-NEXT: v_cvt_f16_f32_e32 v57, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v2
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:104
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v60
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v7
@@ -34624,8 +34588,6 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v9
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v8
@@ -34647,9 +34609,18 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v17
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:104
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v16
@@ -34659,6 +34630,7 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v21
@@ -34696,7 +34668,6 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v61
; SI-NEXT: v_cvt_f16_f32_e32 v61, v49
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v49, v55
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -34711,6 +34682,7 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v32
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v32, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -35582,7 +35554,6 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v61, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v28
@@ -35661,6 +35632,7 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v60
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v61
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -35825,6 +35797,9 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB46_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
@@ -35839,9 +35814,6 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v38, v61, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v37, v60, s6
@@ -35861,6 +35833,10 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v44, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -35889,10 +35865,6 @@ define <15 x i64> @bitcast_v60f16_to_v15i64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v48, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v39, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -36213,12 +36185,35 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: v_cvt_f16_f32_e32 v50, v54
; SI-NEXT: v_cvt_f16_f32_e32 v48, v48
; SI-NEXT: v_cvt_f16_f32_e32 v31, v40
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
@@ -36228,7 +36223,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v44
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -36237,7 +36232,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v46
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -36246,7 +36241,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v57
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v58
; SI-NEXT: v_cvt_f16_f32_e32 v58, s16
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
@@ -36257,38 +36252,12 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v34
; SI-NEXT: v_mov_b32_e32 v33, v32
; SI-NEXT: v_or_b32_e32 v10, v32, v10
@@ -36313,12 +36282,12 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
; SI-NEXT: v_or_b32_e32 v1, v12, v1
; SI-NEXT: v_or_b32_e32 v2, v11, v2
@@ -36427,12 +36396,10 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v40, v44
; SI-NEXT: s_cbranch_vccnz .LBB47_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: v_cvt_f32_f16_e32 v1, v58
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
@@ -37218,7 +37185,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -37251,7 +37218,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -37284,7 +37251,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -37401,7 +37368,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -37434,7 +37401,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -37467,7 +37434,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -39888,40 +39855,53 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:4
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v2
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v4
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:52
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:112
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:108
+; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v4
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:104
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:100
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v10
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v6
+; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:96
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:92
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v10
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:84
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v12
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:48
@@ -39952,27 +39932,10 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:56
; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:52
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v28
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v30
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v30
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
@@ -40057,7 +40020,6 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; kill: killed $vgpr30
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55
; SI-NEXT: v_and_b32_e32 v18, 0xffff, v61
; SI-NEXT: ; kill: killed $vgpr30
@@ -40202,7 +40164,6 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v61
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -40350,7 +40311,7 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: bitcast_v60i16_to_v15f64:
@@ -40632,7 +40593,6 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v61, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v28
@@ -40711,6 +40671,7 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v60
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v61
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -40875,6 +40836,9 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB50_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
@@ -40889,9 +40853,6 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v38, v61, s6
; GFX9-NEXT: v_perm_b32 v1, v37, v60, s6
; GFX9-NEXT: v_perm_b32 v2, v62, v59, s6
@@ -40910,6 +40871,10 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_u16 v6, v6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v44, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -40938,10 +40903,6 @@ define <15 x double> @bitcast_v60i16_to_v15f64(<60 x i16> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v48, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v39, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, v10, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -42097,7 +42058,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -42130,7 +42091,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -42163,7 +42124,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -42280,7 +42241,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -42313,7 +42274,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -42346,7 +42307,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -45262,6 +45223,9 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:116
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32
@@ -45290,23 +45254,12 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:88
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:84
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
; SI-NEXT: v_cvt_f16_f32_e32 v59, v1
; SI-NEXT: v_cvt_f16_f32_e32 v57, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v2
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:104
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v60
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v7
@@ -45316,8 +45269,6 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v9
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v8
@@ -45339,9 +45290,18 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v14
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v17
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:104
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v16
@@ -45351,6 +45311,7 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v21
@@ -45388,7 +45349,6 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v61
; SI-NEXT: v_cvt_f16_f32_e32 v61, v49
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v49, v55
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -45403,6 +45363,7 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v32
+; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f16_f32_e32 v32, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -46274,7 +46235,6 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v61, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v28
@@ -46353,6 +46313,7 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v60
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v61
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
@@ -46517,6 +46478,9 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB54_4
; GFX9-NEXT: ; %bb.3: ; %cmp.true
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
@@ -46531,9 +46495,6 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b32 s6, 0x5040100
; GFX9-NEXT: v_perm_b32 v0, v38, v61, s6
; GFX9-NEXT: s_movk_i32 s7, 0x200
; GFX9-NEXT: v_perm_b32 v1, v37, v60, s6
@@ -46553,6 +46514,10 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_pk_add_f16 v6, v6, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, v7, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, v8, s7 op_sel_hi:[1,0]
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_perm_b32 v9, v9, v44, s6
; GFX9-NEXT: s_waitcnt vmcnt(14)
@@ -46581,10 +46546,6 @@ define <15 x double> @bitcast_v60f16_to_v15f64(<60 x half> %a, i32 %b) {
; GFX9-NEXT: v_perm_b32 v21, v21, v48, s6
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_perm_b32 v22, v22, v39, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v23, v24, v23, s6
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; GFX9-NEXT: v_pk_add_f16 v9, v9, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, v10, s7 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v11, v11, s7 op_sel_hi:[1,0]
@@ -46905,12 +46866,35 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: v_cvt_f16_f32_e32 v50, v54
; SI-NEXT: v_cvt_f16_f32_e32 v48, v48
; SI-NEXT: v_cvt_f16_f32_e32 v31, v40
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
@@ -46920,7 +46904,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v44
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -46929,7 +46913,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v46
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -46938,7 +46922,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v57
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v58
; SI-NEXT: v_cvt_f16_f32_e32 v58, s16
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
@@ -46949,38 +46933,12 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v34
; SI-NEXT: v_mov_b32_e32 v33, v32
; SI-NEXT: v_or_b32_e32 v10, v32, v10
@@ -47005,12 +46963,12 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
; SI-NEXT: v_or_b32_e32 v1, v12, v1
; SI-NEXT: v_or_b32_e32 v2, v11, v2
@@ -47119,12 +47077,10 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v40, v44
; SI-NEXT: s_cbranch_vccnz .LBB55_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: v_cvt_f32_f16_e32 v1, v58
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
@@ -47910,7 +47866,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v12
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:316
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:312
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:308
@@ -47943,7 +47899,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:200
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:196
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:192
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:188
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:184
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:180
@@ -47976,7 +47932,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v157, s32 offset:72
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v158, s32 offset:68
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v159, s32 offset:64
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Spill
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v168, s32 offset:60
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v169, s32 offset:56
; GFX11-TRUE16-NEXT: scratch_store_b32 off, v170, s32 offset:52
@@ -48093,7 +48049,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v19, v188 :: v_dual_mov_b32 v20, v187
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v21, v186 :: v_dual_mov_b32 v22, v185
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v23, v191 :: v_dual_mov_b32 v24, v190
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v191, off, s32
; GFX11-TRUE16-NEXT: scratch_load_b32 v190, off, s32 offset:4
; GFX11-TRUE16-NEXT: scratch_load_b32 v189, off, s32 offset:8
@@ -48126,7 +48082,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:116
; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:120
; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:124
-; GFX11-TRUE16-NEXT: s_clause 0x1f
+; GFX11-TRUE16-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:128
; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:132
; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:136
@@ -48159,7 +48115,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:244
; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:248
; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:252
-; GFX11-TRUE16-NEXT: s_clause 0xf
+; GFX11-TRUE16-NEXT: s_clause 0xf ; 64-byte Folded Reload
; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:256
; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:260
; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:264
@@ -51893,27 +51849,27 @@ define <60 x i16> @bitcast_v60f16_to_v60i16(<60 x half> %a, i32 %b) {
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v55, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v40, v4
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v18
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v40, v4
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v6
; SI-NEXT: v_cvt_f16_f32_e32 v53, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v10
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v6
; SI-NEXT: v_cvt_f16_f32_e32 v49, v12
; SI-NEXT: v_cvt_f16_f32_e32 v6, v13
; SI-NEXT: v_cvt_f16_f32_e32 v37, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v18
; SI-NEXT: v_cvt_f16_f32_e32 v52, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v9
; SI-NEXT: v_cvt_f16_f32_e32 v48, v11
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_cvt_f16_f32_e32 v38, v16
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v2, v19
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
@@ -53259,6 +53215,8 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v1
; SI-NEXT: v_cvt_f32_f16_e32 v1, v44
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -53285,10 +53243,13 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1
; SI-NEXT: v_cvt_f32_f16_e32 v1, v50
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v51, v11
; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
@@ -53300,8 +53261,26 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v3, v26
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v50
+; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v8
+; SI-NEXT: v_mov_b32_e32 v8, v48
+; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v44
+; SI-NEXT: v_lshr_b64 v[44:45], v[29:30], 16
+; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v38
+; SI-NEXT: v_cvt_f32_f16_e32 v38, v43
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
@@ -53329,17 +53308,11 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v18, v3, v5
; SI-NEXT: v_cvt_f32_f16_e32 v5, v37
; SI-NEXT: v_cvt_f32_f16_e32 v3, v16
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_mov_b32_e32 v51, v11
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v55, v5
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
@@ -53382,52 +53355,32 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v3, v6
; SI-NEXT: v_lshr_b64 v[58:59], v[34:35], 16
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
-; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v50
-; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v8
-; SI-NEXT: v_mov_b32_e32 v8, v48
; SI-NEXT: v_cvt_f16_f32_e32 v48, v5
-; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v48
-; SI-NEXT: v_or_b32_e32 v6, v3, v5
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v4
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v44
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v31
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v4
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v48
; SI-NEXT: v_mov_b32_e32 v59, v48
-; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v1
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v38
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v60
-; SI-NEXT: v_or_b32_e32 v4, v3, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v38, v43
-; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_lshr_b64 v[47:48], v[17:18], 16
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshr_b64 v[44:45], v[29:30], 16
+; SI-NEXT: v_or_b32_e32 v6, v3, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1
; SI-NEXT: v_cvt_f32_f16_e32 v1, v24
; SI-NEXT: v_cvt_f32_f16_e32 v24, v8
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v31
; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v60
+; SI-NEXT: v_or_b32_e32 v4, v3, v4
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1
; SI-NEXT: v_cvt_f32_f16_e32 v1, v20
; SI-NEXT: v_cvt_f32_f16_e32 v20, v39
+; SI-NEXT: v_lshr_b64 v[47:48], v[17:18], 16
; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v31, v20
@@ -53524,14 +53477,15 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v32, v41
; SI-NEXT: v_lshr_b64 v[40:41], v[21:22], 16
; SI-NEXT: v_lshr_b64 v[20:21], v[11:12], 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshr_b64 v[20:21], v[56:57], 16
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v11, v24
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshr_b64 v[8:9], v[9:10], 16
; SI-NEXT: v_mov_b32_e32 v39, v31
; SI-NEXT: v_mov_b32_e32 v31, v60
@@ -53541,7 +53495,6 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v37, v55
; SI-NEXT: v_lshr_b64 v[55:56], v[5:6], 16
; SI-NEXT: v_lshr_b64 v[24:25], v[3:4], 16
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshr_b64 v[20:21], v[1:2], 16
; SI-NEXT: .LBB59_3: ; %end
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v58
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 30ad46d9..f3885d6 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -968,14 +968,14 @@ define <64 x bfloat> @v_load_global_v64bf16(ptr addrspace(1) %ptr) {
; GFX8-NEXT: v_addc_u32_e32 v25, vcc, 0, v29, vcc
; GFX8-NEXT: s_movk_i32 s4, 0x70
; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[28:29]
-; GFX8-NEXT: flat_load_dwordx4 v[12:15], v[12:13]
; GFX8-NEXT: v_add_u32_e32 v28, vcc, s4, v28
; GFX8-NEXT: v_addc_u32_e32 v29, vcc, 0, v29, vcc
; GFX8-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
; GFX8-NEXT: flat_load_dwordx4 v[8:11], v[8:9]
+; GFX8-NEXT: flat_load_dwordx4 v[12:15], v[12:13]
+; GFX8-NEXT: flat_load_dwordx4 v[24:27], v[24:25]
; GFX8-NEXT: flat_load_dwordx4 v[16:19], v[16:17]
; GFX8-NEXT: flat_load_dwordx4 v[20:23], v[20:21]
-; GFX8-NEXT: flat_load_dwordx4 v[24:27], v[24:25]
; GFX8-NEXT: flat_load_dwordx4 v[28:31], v[28:29]
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_setpc_b64 s[30:31]
@@ -9552,6 +9552,7 @@ define <32 x double> @global_extload_v32bf16_to_v32f64(ptr addrspace(1) %ptr) {
; GFX8-NEXT: v_addc_u32_e32 v34, vcc, 0, v2, vcc
; GFX8-NEXT: v_add_u32_e32 v35, vcc, 36, v1
; GFX8-NEXT: v_addc_u32_e32 v36, vcc, 0, v2, vcc
+; GFX8-NEXT: v_add_u32_e32 v37, vcc, 38, v1
; GFX8-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX8-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
; GFX8-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
@@ -9563,7 +9564,6 @@ define <32 x double> @global_extload_v32bf16_to_v32f64(ptr addrspace(1) %ptr) {
; GFX8-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX8-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX8-NEXT: buffer_store_dword v58, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX8-NEXT: v_add_u32_e32 v37, vcc, 38, v1
; GFX8-NEXT: flat_load_ushort v44, v[1:2]
; GFX8-NEXT: v_addc_u32_e32 v38, vcc, 0, v2, vcc
; GFX8-NEXT: v_add_u32_e32 v48, vcc, 40, v1
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
index f8655a7..f465e3c 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
@@ -280,7 +280,7 @@ bb0:
br i1 %tmp, label %bb2, label %bb3
bb2:
- store volatile i32 17, ptr addrspace(1) undef
+ store volatile i32 17, ptr addrspace(1) poison
br label %bb4
bb3:
@@ -375,7 +375,7 @@ bb0:
br i1 %cmp0, label %bb2, label %bb1
bb1:
- %val = load volatile i32, ptr addrspace(4) undef
+ %val = load volatile i32, ptr addrspace(4) poison
%cmp1 = icmp eq i32 %val, 3
br i1 %cmp1, label %bb3, label %bb2
@@ -512,7 +512,7 @@ loop_body:
br label %loop
ret:
- store volatile i32 7, ptr addrspace(1) undef
+ store volatile i32 7, ptr addrspace(1) poison
ret void
}
@@ -622,7 +622,7 @@ bb14: ; preds = %bb13, %bb9
br label %bb19
bb19: ; preds = %bb14, %bb13, %bb9
- %tmp20 = phi i32 [ undef, %bb9 ], [ undef, %bb13 ], [ %tmp18, %bb14 ]
+ %tmp20 = phi i32 [ poison, %bb9 ], [ poison, %bb13 ], [ %tmp18, %bb14 ]
%tmp21 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %arg5
store i32 %tmp20, ptr addrspace(1) %tmp21, align 4
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
index 6831380..04f8ad8 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
@@ -450,23 +450,38 @@ define amdgpu_kernel void @memcpy_known(ptr addrspace(7) %src, ptr addrspace(7)
; GISEL-GFX942-NEXT: v_add_u32_e32 v63, s12, v1
; GISEL-GFX942-NEXT: v_add_u32_e32 v1, 0x100, v1
; GISEL-GFX942-NEXT: v_cmp_lt_u32_e32 vcc, v1, v0
-; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0)
-; GISEL-GFX942-NEXT: scratch_store_dwordx4 off, a[0:3], off ; 16-byte Folded Spill
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v63, s[4:7], 0 offen offset:16
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v63, s[4:7], 0 offen offset:32
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v63, s[4:7], 0 offen offset:48
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v63, s[4:7], 0 offen offset:64
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v63, s[4:7], 0 offen offset:80
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v63, s[4:7], 0 offen offset:96
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v63, s[4:7], 0 offen offset:112
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v63, s[4:7], 0 offen offset:128
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v63, s[4:7], 0 offen offset:144
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v63, s[4:7], 0 offen offset:160
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v63, s[4:7], 0 offen offset:176
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v63, s[4:7], 0 offen offset:192
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v63, s[4:7], 0 offen offset:208
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v63, s[4:7], 0 offen offset:224
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
+; GISEL-GFX942-NEXT: scratch_store_dwordx4 off, a[0:3], off ; 16-byte Folded Spill
; GISEL-GFX942-NEXT: scratch_load_dwordx4 v[2:5], off, off ; 16-byte Folded Reload
; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen offset:240
@@ -976,23 +991,38 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX942-NEXT: v_add_u32_e32 v63, s12, v1
; GISEL-GFX942-NEXT: v_add_u32_e32 v1, 0x100, v1
; GISEL-GFX942-NEXT: v_cmp_lt_u32_e32 vcc, v1, v0
-; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0)
-; GISEL-GFX942-NEXT: scratch_store_dwordx4 off, a[0:3], off ; 16-byte Folded Spill
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v63, s[4:7], 0 offen offset:16
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v63, s[4:7], 0 offen offset:32
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v63, s[4:7], 0 offen offset:48
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v63, s[4:7], 0 offen offset:64
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v63, s[4:7], 0 offen offset:80
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v63, s[4:7], 0 offen offset:96
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v63, s[4:7], 0 offen offset:112
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v63, s[4:7], 0 offen offset:128
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v63, s[4:7], 0 offen offset:144
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v63, s[4:7], 0 offen offset:160
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v63, s[4:7], 0 offen offset:176
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v63, s[4:7], 0 offen offset:192
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v63, s[4:7], 0 offen offset:208
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v63, s[4:7], 0 offen offset:224
+; GISEL-GFX942-NEXT: s_waitcnt vmcnt(15)
+; GISEL-GFX942-NEXT: scratch_store_dwordx4 off, a[0:3], off ; 16-byte Folded Spill
; GISEL-GFX942-NEXT: scratch_load_dwordx4 v[2:5], off, off ; 16-byte Folded Reload
; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen offset:240
@@ -1159,24 +1189,23 @@ define amdgpu_kernel void @memcpy_known_small(ptr addrspace(7) %src, ptr addrspa
; SDAG-GFX1100-NEXT: s_mov_b32 s9, s12
; SDAG-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-GFX1100-NEXT: s_mov_b32 s6, s3
-; SDAG-GFX1100-NEXT: v_mov_b32_e32 v4, s0
; SDAG-GFX1100-NEXT: s_mov_b32 s8, s1
; SDAG-GFX1100-NEXT: s_or_b64 s[10:11], s[6:7], s[12:13]
; SDAG-GFX1100-NEXT: s_mov_b32 s13, s2
-; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; SDAG-GFX1100-NEXT: v_mov_b32_e32 v4, s0
; SDAG-GFX1100-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[0:3], v4, s[8:11], 0 offen
; SDAG-GFX1100-NEXT: s_clause 0x1
; SDAG-GFX1100-NEXT: s_load_b32 s13, s[4:5], 0x54
; SDAG-GFX1100-NEXT: s_load_b128 s[0:3], s[4:5], 0x44
; SDAG-GFX1100-NEXT: s_mov_b32 s5, s12
; SDAG-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX1100-NEXT: s_mov_b32 s4, s3
; SDAG-GFX1100-NEXT: v_mov_b32_e32 v5, s0
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[0:3], v4, s[8:11], 0 offen
+; SDAG-GFX1100-NEXT: s_mov_b32 s4, s3
+; SDAG-GFX1100-NEXT: s_mov_b32 s3, s12
; SDAG-GFX1100-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
; SDAG-GFX1100-NEXT: s_mov_b32 s13, s2
; SDAG-GFX1100-NEXT: s_mov_b32 s2, s1
-; SDAG-GFX1100-NEXT: s_mov_b32 s3, s12
; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; SDAG-GFX1100-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13]
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(0)
@@ -1220,12 +1249,12 @@ define amdgpu_kernel void @memcpy_known_small(ptr addrspace(7) %src, ptr addrspa
; GISEL-GFX1100-NEXT: s_mov_b32 s8, s1
; GISEL-GFX1100-NEXT: s_mov_b32 s9, s2
; GISEL-GFX1100-NEXT: s_mov_b32 s10, s3
-; GISEL-GFX1100-NEXT: buffer_load_b128 v[0:3], v4, s[8:11], 0 offen
; GISEL-GFX1100-NEXT: s_clause 0x1
; GISEL-GFX1100-NEXT: s_load_b128 s[0:3], s[4:5], 0x44
; GISEL-GFX1100-NEXT: s_load_b32 s7, s[4:5], 0x54
; GISEL-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-GFX1100-NEXT: v_mov_b32_e32 v5, s0
+; GISEL-GFX1100-NEXT: buffer_load_b128 v[0:3], v4, s[8:11], 0 offen
; GISEL-GFX1100-NEXT: s_mov_b32 s4, s1
; GISEL-GFX1100-NEXT: s_mov_b32 s5, s2
; GISEL-GFX1100-NEXT: s_mov_b32 s6, s3
diff --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
index 8e12e7e..832e43f 100644
--- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll
@@ -4253,6 +4253,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
; VI-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; VI-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: buffer_load_dwordx4 v[24:27], off, s[4:7], 0 offset:96
; VI-NEXT: buffer_load_dwordx4 v[28:31], off, s[4:7], 0 offset:112
; VI-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
; VI-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0 offset:16
@@ -4260,7 +4261,6 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
; VI-NEXT: buffer_load_dwordx4 v[12:15], off, s[4:7], 0 offset:48
; VI-NEXT: buffer_load_dwordx4 v[16:19], off, s[4:7], 0 offset:64
; VI-NEXT: buffer_load_dwordx4 v[20:23], off, s[4:7], 0 offset:80
-; VI-NEXT: buffer_load_dwordx4 v[24:27], off, s[4:7], 0 offset:96
; VI-NEXT: s_mov_b32 s38, -1
; VI-NEXT: s_mov_b32 s39, 0xe80000
; VI-NEXT: s_add_u32 s36, s36, s3
@@ -4272,7 +4272,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
; VI-NEXT: s_add_u32 s8, s8, external_void_func_v32i32@rel32@lo+4
; VI-NEXT: s_addc_u32 s9, s9, external_void_func_v32i32@rel32@hi+12
; VI-NEXT: s_mov_b64 s[2:3], s[38:39]
-; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: buffer_store_dword v31, off, s[36:39], s32
; VI-NEXT: s_swappc_b64 s[30:31], s[8:9]
; VI-NEXT: s_endpgm
@@ -4285,6 +4285,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
; CI-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; CI-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: buffer_load_dwordx4 v[24:27], off, s[4:7], 0 offset:96
; CI-NEXT: buffer_load_dwordx4 v[28:31], off, s[4:7], 0 offset:112
; CI-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
; CI-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0 offset:16
@@ -4292,7 +4293,6 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
; CI-NEXT: buffer_load_dwordx4 v[12:15], off, s[4:7], 0 offset:48
; CI-NEXT: buffer_load_dwordx4 v[16:19], off, s[4:7], 0 offset:64
; CI-NEXT: buffer_load_dwordx4 v[20:23], off, s[4:7], 0 offset:80
-; CI-NEXT: buffer_load_dwordx4 v[24:27], off, s[4:7], 0 offset:96
; CI-NEXT: s_mov_b32 s38, -1
; CI-NEXT: s_mov_b32 s39, 0xe8f000
; CI-NEXT: s_add_u32 s36, s36, s3
@@ -4304,7 +4304,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
; CI-NEXT: s_add_u32 s8, s8, external_void_func_v32i32@rel32@lo+4
; CI-NEXT: s_addc_u32 s9, s9, external_void_func_v32i32@rel32@hi+12
; CI-NEXT: s_mov_b64 s[2:3], s[38:39]
-; CI-NEXT: s_waitcnt vmcnt(7)
+; CI-NEXT: s_waitcnt vmcnt(6)
; CI-NEXT: buffer_store_dword v31, off, s[36:39], s32
; CI-NEXT: s_swappc_b64 s[30:31], s[8:9]
; CI-NEXT: s_endpgm
@@ -4317,6 +4317,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: buffer_load_dwordx4 v[24:27], off, s[4:7], 0 offset:96
; GFX9-NEXT: buffer_load_dwordx4 v[28:31], off, s[4:7], 0 offset:112
; GFX9-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
; GFX9-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0 offset:16
@@ -4324,7 +4325,6 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
; GFX9-NEXT: buffer_load_dwordx4 v[12:15], off, s[4:7], 0 offset:48
; GFX9-NEXT: buffer_load_dwordx4 v[16:19], off, s[4:7], 0 offset:64
; GFX9-NEXT: buffer_load_dwordx4 v[20:23], off, s[4:7], 0 offset:80
-; GFX9-NEXT: buffer_load_dwordx4 v[24:27], off, s[4:7], 0 offset:96
; GFX9-NEXT: s_mov_b32 s38, -1
; GFX9-NEXT: s_mov_b32 s39, 0xe00000
; GFX9-NEXT: s_add_u32 s36, s36, s3
@@ -4336,7 +4336,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32() #0 {
; GFX9-NEXT: s_add_u32 s8, s8, external_void_func_v32i32@rel32@lo+4
; GFX9-NEXT: s_addc_u32 s9, s9, external_void_func_v32i32@rel32@hi+12
; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
-; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: buffer_store_dword v31, off, s[36:39], s32
; GFX9-NEXT: s_swappc_b64 s[30:31], s[8:9]
; GFX9-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/ds_write2.ll b/llvm/test/CodeGen/AMDGPU/ds_write2.ll
index 0cae0e5..5cc6845 100644
--- a/llvm/test/CodeGen/AMDGPU/ds_write2.ll
+++ b/llvm/test/CodeGen/AMDGPU/ds_write2.ll
@@ -851,12 +851,12 @@ define amdgpu_kernel void @unaligned_offset_simple_write2_one_val_f64(ptr addrsp
; CI-NEXT: v_add_i32_e32 v0, vcc, s4, v0
; CI-NEXT: s_mov_b32 m0, -1
; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: ds_write_b8 v0, v1 offset:9
+; CI-NEXT: ds_write_b8 v0, v2 offset:13
; CI-NEXT: v_lshrrev_b32_e32 v3, 24, v1
; CI-NEXT: ds_write_b8 v0, v1 offset:5
; CI-NEXT: v_lshrrev_b32_e32 v4, 16, v1
; CI-NEXT: v_lshrrev_b32_e32 v5, 8, v1
-; CI-NEXT: ds_write_b8 v0, v1 offset:9
-; CI-NEXT: ds_write_b8 v0, v2 offset:13
; CI-NEXT: v_lshrrev_b32_e32 v1, 24, v2
; CI-NEXT: v_lshrrev_b32_e32 v6, 16, v2
; CI-NEXT: v_lshrrev_b32_e32 v2, 8, v2
diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
index 5fb50d0..da08f4f 100644
--- a/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll
@@ -3755,42 +3755,44 @@ define <64 x half> @v_test_canonicalize_var_v64f16(<64 x half> %val) #1 {
; CI-NEXT: v_lshlrev_b32_e32 v10, 16, v13
; CI-NEXT: v_cvt_f16_f32_e32 v13, v22
; CI-NEXT: v_or_b32_e32 v10, v14, v10
+; CI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:4
+; CI-NEXT: buffer_load_dword v15, off, s[0:3], s32
; CI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; CI-NEXT: v_or_b32_e32 v17, v18, v17
; CI-NEXT: v_cvt_f32_f16_e32 v13, v13
+; CI-NEXT: v_or_b32_e32 v17, v18, v17
; CI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:16
; CI-NEXT: v_cvt_f16_f32_e32 v22, v27
-; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
; CI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; CI-NEXT: v_cvt_f16_f32_e32 v19, v19
; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; CI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; CI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; CI-NEXT: v_or_b32_e32 v13, v16, v13
; CI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:12
; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; CI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; CI-NEXT: v_or_b32_e32 v19, v20, v19
; CI-NEXT: v_lshlrev_b32_e32 v20, 16, v21
; CI-NEXT: v_cvt_f16_f32_e32 v21, v30
; CI-NEXT: v_or_b32_e32 v20, v22, v20
; CI-NEXT: v_cvt_f16_f32_e32 v22, v29
-; CI-NEXT: s_waitcnt vmcnt(6)
+; CI-NEXT: s_waitcnt vmcnt(8)
; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
; CI-NEXT: v_cvt_f32_f16_e32 v21, v21
; CI-NEXT: v_cvt_f32_f16_e32 v22, v22
; CI-NEXT: v_cvt_f32_f16_e32 v11, v11
-; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; CI-NEXT: s_waitcnt vmcnt(5)
+; CI-NEXT: s_waitcnt vmcnt(7)
; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
+; CI-NEXT: v_cvt_f16_f32_e32 v21, v21
; CI-NEXT: v_cvt_f16_f32_e32 v22, v22
; CI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; CI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; CI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; CI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; CI-NEXT: v_or_b32_e32 v21, v22, v21
; CI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; CI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; CI-NEXT: s_waitcnt vmcnt(3)
+; CI-NEXT: s_waitcnt vmcnt(5)
; CI-NEXT: v_cvt_f16_f32_e32 v31, v31
-; CI-NEXT: s_waitcnt vmcnt(2)
+; CI-NEXT: s_waitcnt vmcnt(4)
; CI-NEXT: v_cvt_f16_f32_e32 v32, v32
; CI-NEXT: v_cvt_f32_f16_e32 v31, v31
; CI-NEXT: v_cvt_f32_f16_e32 v32, v32
@@ -3802,6 +3804,27 @@ define <64 x half> @v_test_canonicalize_var_v64f16(<64 x half> %val) #1 {
; CI-NEXT: buffer_store_dword v31, v32, s[0:3], 0 offen
; CI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:116
; CI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
+; CI-NEXT: s_waitcnt vmcnt(6)
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; CI-NEXT: s_waitcnt vmcnt(5)
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; CI-NEXT: v_or_b32_e32 v14, v15, v14
+; CI-NEXT: s_waitcnt vmcnt(3)
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v16
+; CI-NEXT: v_cvt_f16_f32_e32 v16, v18
+; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; CI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; CI-NEXT: v_cvt_f16_f32_e32 v16, v16
+; CI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
+; CI-NEXT: v_or_b32_e32 v12, v12, v15
+; CI-NEXT: v_add_i32_e32 v15, vcc, 0x44, v0
+; CI-NEXT: v_or_b32_e32 v11, v16, v11
; CI-NEXT: s_waitcnt vmcnt(1)
; CI-NEXT: v_cvt_f16_f32_e32 v31, v31
; CI-NEXT: s_waitcnt vmcnt(0)
@@ -3968,28 +3991,6 @@ define <64 x half> @v_test_canonicalize_var_v64f16(<64 x half> %val) #1 {
; CI-NEXT: v_or_b32_e32 v31, v32, v31
; CI-NEXT: v_add_i32_e32 v32, vcc, 0x48, v0
; CI-NEXT: buffer_store_dword v31, v32, s[0:3], 0 offen
-; CI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:4
-; CI-NEXT: buffer_load_dword v15, off, s[0:3], s32
-; CI-NEXT: s_waitcnt vmcnt(1)
-; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; CI-NEXT: v_cvt_f32_f16_e32 v14, v14
-; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
-; CI-NEXT: v_cvt_f16_f32_e32 v14, v14
-; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; CI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; CI-NEXT: v_or_b32_e32 v14, v15, v14
-; CI-NEXT: v_cvt_f16_f32_e32 v15, v16
-; CI-NEXT: v_cvt_f16_f32_e32 v16, v18
-; CI-NEXT: v_cvt_f32_f16_e32 v15, v15
-; CI-NEXT: v_cvt_f32_f16_e32 v16, v16
-; CI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; CI-NEXT: v_cvt_f16_f32_e32 v16, v16
-; CI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; CI-NEXT: v_or_b32_e32 v12, v12, v15
-; CI-NEXT: v_or_b32_e32 v11, v16, v11
-; CI-NEXT: v_add_i32_e32 v15, vcc, 0x44, v0
; CI-NEXT: buffer_store_dword v11, v15, s[0:3], 0 offen
; CI-NEXT: v_add_i32_e32 v11, vcc, 64, v0
; CI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen
diff --git a/llvm/test/CodeGen/AMDGPU/finalizebundle.mir b/llvm/test/CodeGen/AMDGPU/finalizebundle.mir
index 279f429..590d69b 100644
--- a/llvm/test/CodeGen/AMDGPU/finalizebundle.mir
+++ b/llvm/test/CodeGen/AMDGPU/finalizebundle.mir
@@ -1,6 +1,19 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -passes=finalizebundle-test %s -o - | FileCheck %s
+--- |
+
+ @foo = addrspace(3) global i32 poison
+
+ define void @test_overlap() { unreachable }
+ define void @test_dead_redef() { unreachable }
+ define void @test_tied() { unreachable }
+ define void @test_mmo_merge1() { unreachable }
+ define void @test_mmo_merge2() { unreachable }
+ define void @test_mmo_drop() { unreachable }
+
+...
+
---
name: test_overlap
body: |
@@ -47,3 +60,42 @@ body: |
%1:vgpr_32 = COPY %0:vgpr_32
%2:vgpr_32 = V_FMAC_F16_e32 %1, %1, %0, implicit $mode, implicit $exec
...
+
+---
+name: test_mmo_merge1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_mmo_merge1
+ ; CHECK: BUNDLE implicit-def %0, implicit %1:vgpr_32, implicit $exec :: (store (s32) into @foo, addrspace 3) {
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY %1:vgpr_32
+ ; CHECK-NEXT: DS_WRITE_B32_gfx9 %1:vgpr_32, internal [[COPY]], 0, 0, implicit $exec :: (store (s32) into @foo, addrspace 3)
+ ; CHECK-NEXT: }
+ %1:vgpr_32 = COPY %0:vgpr_32
+ DS_WRITE_B32_gfx9 %0, %1, 0, 0, implicit $exec :: (store (s32) into @foo, addrspace 3)
+...
+
+---
+name: test_mmo_merge2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_mmo_merge2
+ ; CHECK: BUNDLE implicit %0:vgpr_32, implicit %1:vgpr_32, implicit $exec :: (store (s32) into @foo, addrspace 3), (store (s32) into @foo + 4, addrspace 3) {
+ ; CHECK-NEXT: DS_WRITE_B32_gfx9 %0:vgpr_32, %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into @foo, addrspace 3)
+ ; CHECK-NEXT: DS_WRITE_B32_gfx9 %0:vgpr_32, %1:vgpr_32, 4, 0, implicit $exec :: (store (s32) into @foo + 4, addrspace 3)
+ ; CHECK-NEXT: }
+ DS_WRITE_B32_gfx9 %0:vgpr_32, %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into @foo, addrspace 3)
+ DS_WRITE_B32_gfx9 %0:vgpr_32, %1:vgpr_32, 4, 0, implicit $exec :: (store (s32) into @foo + 4, addrspace 3)
+...
+
+---
+name: test_mmo_drop
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_mmo_drop
+ ; CHECK: BUNDLE implicit %0:vgpr_32, implicit %1:vgpr_32, implicit $exec {
+ ; CHECK-NEXT: DS_WRITE_B32_gfx9 %0:vgpr_32, %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into @foo, addrspace 3)
+ ; CHECK-NEXT: DS_WRITE_B32_gfx9 %0:vgpr_32, %1:vgpr_32, 4, 0, implicit $exec
+ ; CHECK-NEXT: }
+ DS_WRITE_B32_gfx9 %0:vgpr_32, %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into @foo, addrspace 3)
+ DS_WRITE_B32_gfx9 %0:vgpr_32, %1:vgpr_32, 4, 0, implicit $exec
+...
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
index eefc781..3572340 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
@@ -263,7 +263,7 @@ define amdgpu_ps float @flat_xchg_saddr_i32_rtn_neg2048(ptr inreg %sbase, i32 %v
; Uniformity edge cases
; --------------------------------------------------------------------------------
-@ptr.in.lds = internal addrspace(3) global ptr undef
+@ptr.in.lds = internal addrspace(3) global ptr poison
; Base pointer is uniform, but also in VGPRs
define amdgpu_ps float @flat_xchg_saddr_uniform_ptr_in_vgprs_rtn(i32 %voffset, i32 %data) {
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-store.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-store.ll
index 32888d2..3d0e287 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-store.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-store.ll
@@ -54,7 +54,7 @@ define amdgpu_ps void @flat_store_saddr_i8_zext_vgpr_offset_neg2048(ptr inreg %s
; Uniformity edge cases
; --------------------------------------------------------------------------------
-@ptr.in.lds = internal addrspace(3) global ptr undef
+@ptr.in.lds = internal addrspace(3) global ptr poison
; Base pointer is uniform, but also in VGPRs
define amdgpu_ps void @flat_store_saddr_uniform_ptr_in_vgprs(i32 %voffset, i8 %data) {
diff --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll
index b750d28..ba81446 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll
@@ -807,7 +807,7 @@ define amdgpu_gfx void @call_100xi32() #0 {
; GFX10-NEXT: buffer_store_dword v95, off, s[0:3], s33 ; 4-byte Folded Spill
; GFX10-NEXT: v_writelane_b32 v100, s31, 1
; GFX10-NEXT: s_swappc_b64 s[30:31], s[34:35]
-; GFX10-NEXT: s_clause 0x1f
+; GFX10-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX10-NEXT: buffer_load_dword v95, off, s[0:3], s33
; GFX10-NEXT: buffer_load_dword v94, off, s[0:3], s33 offset:4
; GFX10-NEXT: buffer_load_dword v93, off, s[0:3], s33 offset:8
@@ -863,7 +863,7 @@ define amdgpu_gfx void @call_100xi32() #0 {
; GFX11-NEXT: s_mov_b32 s1, return_100xi32@abs32@hi
; GFX11-NEXT: s_mov_b32 s0, return_100xi32@abs32@lo
; GFX11-NEXT: s_addk_i32 s32, 0x90
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s33 offset:124
; GFX11-NEXT: scratch_store_b32 off, v41, s33 offset:120
; GFX11-NEXT: scratch_store_b32 off, v42, s33 offset:116
@@ -898,7 +898,7 @@ define amdgpu_gfx void @call_100xi32() #0 {
; GFX11-NEXT: scratch_store_b32 off, v95, s33
; GFX11-NEXT: v_writelane_b32 v100, s31, 1
; GFX11-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX11-NEXT: s_clause 0x1f
+; GFX11-NEXT: s_clause 0x1f ; 128-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v95, off, s33
; GFX11-NEXT: scratch_load_b32 v94, off, s33 offset:4
; GFX11-NEXT: scratch_load_b32 v93, off, s33 offset:8
@@ -2518,7 +2518,7 @@ define amdgpu_gfx <72 x i32> @return_72xi32(<72 x i32> %val) #1 {
; GFX11-LABEL: return_72xi32:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_clause 0xc
+; GFX11-NEXT: s_clause 0xc ; 52-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:212
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:208
; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:204
@@ -2551,23 +2551,23 @@ define amdgpu_gfx <72 x i32> @return_72xi32(<72 x i32> %val) #1 {
; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:96
; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:92
; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:88
+; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: scratch_store_b128 v0, v[21:24], off offset:80
-; GFX11-NEXT: s_clause 0x2
+; GFX11-NEXT: scratch_store_b128 v0, v[17:20], off offset:64
+; GFX11-NEXT: s_clause 0x5
; GFX11-NEXT: scratch_load_b32 v23, off, s32 offset:112
; GFX11-NEXT: scratch_load_b32 v22, off, s32 offset:108
; GFX11-NEXT: scratch_load_b32 v21, off, s32 offset:104
-; GFX11-NEXT: scratch_store_b128 v0, v[17:20], off offset:64
-; GFX11-NEXT: s_clause 0x2
; GFX11-NEXT: scratch_load_b32 v19, off, s32 offset:128
; GFX11-NEXT: scratch_load_b32 v18, off, s32 offset:124
; GFX11-NEXT: scratch_load_b32 v17, off, s32 offset:120
+; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: scratch_store_b128 v0, v[13:16], off offset:48
-; GFX11-NEXT: s_clause 0x2
+; GFX11-NEXT: scratch_store_b128 v0, v[9:12], off offset:32
+; GFX11-NEXT: s_clause 0x10
; GFX11-NEXT: scratch_load_b32 v15, off, s32 offset:144
; GFX11-NEXT: scratch_load_b32 v14, off, s32 offset:140
; GFX11-NEXT: scratch_load_b32 v13, off, s32 offset:136
-; GFX11-NEXT: scratch_store_b128 v0, v[9:12], off offset:32
-; GFX11-NEXT: s_clause 0xd
; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:160
; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:156
; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:152
@@ -2608,7 +2608,7 @@ define amdgpu_gfx <72 x i32> @return_72xi32(<72 x i32> %val) #1 {
; GFX11-NEXT: scratch_store_b128 v0, v[25:28], off offset:96
; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:16
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off
-; GFX11-NEXT: s_clause 0xc
+; GFX11-NEXT: s_clause 0xc ; 52-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:164
; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:168
; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:172
@@ -2641,21 +2641,6 @@ define amdgpu_gfx void @call_72xi32() #1 {
; GFX9-NEXT: s_mov_b32 s34, s32
; GFX9-NEXT: s_add_i32 s32, s32, 0x28000
; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s33 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:8
@@ -2733,6 +2718,21 @@ define amdgpu_gfx void @call_72xi32() #1 {
; GFX9-NEXT: v_mov_b32_e32 v29, 0
; GFX9-NEXT: v_mov_b32_e32 v30, 0
; GFX9-NEXT: v_mov_b32_e32 v31, 0
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s33 ; 4-byte Folded Spill
; GFX9-NEXT: v_writelane_b32 v63, s31, 1
; GFX9-NEXT: s_swappc_b64 s[30:31], s[36:37]
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:636
@@ -2914,21 +2914,7 @@ define amdgpu_gfx void @call_72xi32() #1 {
; GFX10-NEXT: s_mov_b32 s38, s34
; GFX10-NEXT: s_mov_b32 s34, s32
; GFX10-NEXT: s_add_i32 s32, s32, 0x14000
-; GFX10-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v47, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v59, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
-; GFX10-NEXT: buffer_store_dword v62, off, s[0:3], s33 ; 4-byte Folded Spill
+; GFX10-NEXT: v_writelane_b32 v63, s30, 0
; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], s32
; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4
; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:8
@@ -2971,12 +2957,11 @@ define amdgpu_gfx void @call_72xi32() #1 {
; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156
; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160
; GFX10-NEXT: v_lshrrev_b32_e64 v0, 5, s33
-; GFX10-NEXT: v_writelane_b32 v63, s30, 0
; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: v_mov_b32_e32 v3, 0
-; GFX10-NEXT: v_add_nc_u32_e32 v0, 0x200, v0
; GFX10-NEXT: v_mov_b32_e32 v4, 0
+; GFX10-NEXT: v_add_nc_u32_e32 v0, 0x200, v0
; GFX10-NEXT: v_mov_b32_e32 v5, 0
; GFX10-NEXT: v_mov_b32_e32 v6, 0
; GFX10-NEXT: v_mov_b32_e32 v7, 0
@@ -3006,6 +2991,21 @@ define amdgpu_gfx void @call_72xi32() #1 {
; GFX10-NEXT: v_mov_b32_e32 v31, 0
; GFX10-NEXT: s_mov_b32 s37, return_72xi32@abs32@hi
; GFX10-NEXT: s_mov_b32 s36, return_72xi32@abs32@lo
+; GFX10-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:56 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:52 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v42, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v44, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v45, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v46, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v47, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v56, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v57, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v58, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v59, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v60, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v61, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
+; GFX10-NEXT: buffer_store_dword v62, off, s[0:3], s33 ; 4-byte Folded Spill
; GFX10-NEXT: v_writelane_b32 v63, s31, 1
; GFX10-NEXT: s_swappc_b64 s[30:31], s[36:37]
; GFX10-NEXT: s_clause 0x28
@@ -3138,7 +3138,7 @@ define amdgpu_gfx void @call_72xi32() #1 {
; GFX10-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:152
; GFX10-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:156
; GFX10-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:160
-; GFX10-NEXT: s_clause 0x7
+; GFX10-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; GFX10-NEXT: buffer_load_dword v2, off, s[0:3], s33 offset:1536
; GFX10-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:1540
; GFX10-NEXT: buffer_load_dword v4, off, s[0:3], s33 offset:1544
@@ -3151,7 +3151,7 @@ define amdgpu_gfx void @call_72xi32() #1 {
; GFX10-NEXT: v_mov_b32_e32 v1, 42
; GFX10-NEXT: v_add_nc_u32_e32 v0, 0x400, v0
; GFX10-NEXT: s_swappc_b64 s[30:31], s[36:37]
-; GFX10-NEXT: s_clause 0xe
+; GFX10-NEXT: s_clause 0xe ; 60-byte Folded Reload
; GFX10-NEXT: buffer_load_dword v62, off, s[0:3], s33
; GFX10-NEXT: buffer_load_dword v61, off, s[0:3], s33 offset:4
; GFX10-NEXT: buffer_load_dword v60, off, s[0:3], s33 offset:8
@@ -3199,7 +3199,7 @@ define amdgpu_gfx void @call_72xi32() #1 {
; GFX11-NEXT: s_mov_b32 s36, s34
; GFX11-NEXT: s_mov_b32 s34, s32
; GFX11-NEXT: s_addk_i32 s32, 0xa00
-; GFX11-NEXT: s_clause 0xb
+; GFX11-NEXT: s_clause 0xb ; 48-byte Folded Spill
; GFX11-NEXT: scratch_store_b32 off, v40, s33 offset:44
; GFX11-NEXT: scratch_store_b32 off, v41, s33 offset:40
; GFX11-NEXT: scratch_store_b32 off, v42, s33 offset:36
@@ -3341,18 +3341,18 @@ define amdgpu_gfx void @call_72xi32() #1 {
; GFX11-NEXT: s_add_i32 s2, s32, 16
; GFX11-NEXT: v_mov_b32_e32 v30, v46
; GFX11-NEXT: scratch_store_b128 off, v[32:35], s2
-; GFX11-NEXT: s_clause 0x3
-; GFX11-NEXT: scratch_load_b128 v[1:4], off, s33 offset:1584
+; GFX11-NEXT: s_clause 0x3 ; 64-byte Folded Reload
; GFX11-NEXT: scratch_load_b128 v[17:20], off, s33 offset:1568
; GFX11-NEXT: scratch_load_b128 v[21:24], off, s33 offset:1552
; GFX11-NEXT: scratch_load_b128 v[25:28], off, s33 offset:1536
+; GFX11-NEXT: scratch_load_b128 v[1:4], off, s33 offset:1584
; GFX11-NEXT: s_add_i32 s2, s33, 0x400
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v31, v47 :: v_dual_mov_b32 v0, s2
-; GFX11-NEXT: s_waitcnt vmcnt(3)
+; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, 42
; GFX11-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX11-NEXT: s_clause 0xb
+; GFX11-NEXT: s_clause 0xb ; 48-byte Folded Reload
; GFX11-NEXT: scratch_load_b32 v59, off, s33
; GFX11-NEXT: scratch_load_b32 v58, off, s33 offset:4
; GFX11-NEXT: scratch_load_b32 v57, off, s33 offset:8
diff --git a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
index f807169..93d7eeb 100644
--- a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
@@ -255,11 +255,11 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-SDAG-NEXT: global_load_b128 v[26:29], v[0:1], off offset:16
; GCN-SDAG-NEXT: global_load_b128 v[30:33], v[0:1], off
; GCN-SDAG-NEXT: global_load_b128 v[34:37], v[0:1], off offset:64
-; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x70
; GCN-SDAG-NEXT: v_mov_b64_e32 v[48:49], 48
-; GCN-SDAG-NEXT: v_mov_b64_e32 v[38:39], 0x60
; GCN-SDAG-NEXT: v_mov_b64_e32 v[50:51], 32
+; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x70
; GCN-SDAG-NEXT: v_mov_b64_e32 v[64:65], 16
+; GCN-SDAG-NEXT: v_mov_b64_e32 v[38:39], 0x60
; GCN-SDAG-NEXT: v_mov_b64_e32 v[66:67], 0
; GCN-SDAG-NEXT: v_mov_b64_e32 v[52:53], 0x50
; GCN-SDAG-NEXT: v_mov_b64_e32 v[54:55], 64
diff --git a/llvm/test/CodeGen/AMDGPU/hard-clauses-img-gfx11.mir b/llvm/test/CodeGen/AMDGPU/hard-clauses-img-gfx11.mir
index 7e1055b..03b56ca 100644
--- a/llvm/test/CodeGen/AMDGPU/hard-clauses-img-gfx11.mir
+++ b/llvm/test/CodeGen/AMDGPU/hard-clauses-img-gfx11.mir
@@ -11,7 +11,7 @@ body: |
; CHECK-LABEL: name: mimg_nsa
; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: BUNDLE implicit-def $vgpr10_vgpr11_vgpr12, implicit-def $vgpr20_vgpr21_vgpr22, implicit $vgpr3, implicit $vgpr8, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec {
+ ; CHECK-NEXT: BUNDLE implicit-def $vgpr10_vgpr11_vgpr12, implicit-def $vgpr20_vgpr21_vgpr22, implicit $vgpr3, implicit $vgpr8, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec :: (load (s128)) {
; CHECK-NEXT: S_CLAUSE 1
; CHECK-NEXT: $vgpr10_vgpr11_vgpr12 = IMAGE_SAMPLE_LZ_V3_V2_nsa_gfx11 $vgpr3, $vgpr8, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, 1, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (load (s128))
; CHECK-NEXT: $vgpr20_vgpr21_vgpr22 = IMAGE_SAMPLE_LZ_V3_V2_nsa_gfx11 $vgpr3, $vgpr8, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, 14, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (load (s128))
@@ -29,7 +29,7 @@ body: |
; CHECK-LABEL: name: mimg_nsa_mixed
; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: BUNDLE implicit-def $vgpr10, implicit-def $vgpr14, implicit-def $vgpr20_vgpr21_vgpr22, implicit $vgpr3, implicit $vgpr8, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec, implicit $vgpr5_vgpr6 {
+ ; CHECK-NEXT: BUNDLE implicit-def $vgpr10, implicit-def $vgpr14, implicit-def $vgpr20_vgpr21_vgpr22, implicit $vgpr3, implicit $vgpr8, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec, implicit $vgpr5_vgpr6 :: (load (s128)), (dereferenceable load (s128), addrspace 7) {
; CHECK-NEXT: S_CLAUSE 2
; CHECK-NEXT: $vgpr10 = IMAGE_SAMPLE_LZ_V1_V2_nsa_gfx11 $vgpr3, $vgpr8, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, 1, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (load (s128))
; CHECK-NEXT: $vgpr14 = IMAGE_SAMPLE_LZ_V1_V2_gfx11 $vgpr5_vgpr6, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, 1, 1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), addrspace 7)
diff --git a/llvm/test/CodeGen/AMDGPU/hard-clauses-img-gfx12.mir b/llvm/test/CodeGen/AMDGPU/hard-clauses-img-gfx12.mir
index 9689dda..68f9e83 100644
--- a/llvm/test/CodeGen/AMDGPU/hard-clauses-img-gfx12.mir
+++ b/llvm/test/CodeGen/AMDGPU/hard-clauses-img-gfx12.mir
@@ -10,7 +10,7 @@ body: |
; CHECK-LABEL: name: mimg
; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: BUNDLE implicit-def $vgpr10_vgpr11_vgpr12, implicit-def $vgpr20_vgpr21_vgpr22, implicit $vgpr3, implicit $vgpr4, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec {
+ ; CHECK-NEXT: BUNDLE implicit-def $vgpr10_vgpr11_vgpr12, implicit-def $vgpr20_vgpr21_vgpr22, implicit $vgpr3, implicit $vgpr4, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec :: (load (s128)) {
; CHECK-NEXT: S_CLAUSE 1
; CHECK-NEXT: $vgpr10_vgpr11_vgpr12 = IMAGE_SAMPLE_LZ_V3_V2_gfx12 $vgpr3, $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, 1, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (load (s128))
; CHECK-NEXT: $vgpr20_vgpr21_vgpr22 = IMAGE_SAMPLE_LZ_V3_V2_gfx12 $vgpr3, $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, 14, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (load (s128))
@@ -28,7 +28,7 @@ body: |
; CHECK-LABEL: name: mimg_mixed
; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: BUNDLE implicit-def $vgpr10, implicit-def $vgpr14, implicit-def $vgpr20_vgpr21_vgpr22, implicit $vgpr3, implicit $vgpr4, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec, implicit $vgpr5, implicit $vgpr6 {
+ ; CHECK-NEXT: BUNDLE implicit-def $vgpr10, implicit-def $vgpr14, implicit-def $vgpr20_vgpr21_vgpr22, implicit $vgpr3, implicit $vgpr4, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec, implicit $vgpr5, implicit $vgpr6 :: (load (s128)), (dereferenceable load (s128), addrspace 7) {
; CHECK-NEXT: S_CLAUSE 2
; CHECK-NEXT: $vgpr10 = IMAGE_SAMPLE_LZ_V1_V2_gfx12 $vgpr3, $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, 1, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (load (s128))
; CHECK-NEXT: $vgpr14 = IMAGE_SAMPLE_LZ_V1_V2_gfx12 $vgpr5, $vgpr6, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, 1, 1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), addrspace 7)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.gws.barrier-fastregalloc.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.gws.barrier-fastregalloc.ll
index 4719ab9..cbf697f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.gws.barrier-fastregalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.gws.barrier-fastregalloc.ll
@@ -1,13 +1,20 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -stop-after=postrapseudos -o - < %s | FileCheck -enable-var-scope -check-prefix=MIR %s
-; MIR-LABEL: name: gws_barrier_offset0{{$}}
-; MIR: BUNDLE implicit{{( killed)?( renamable)?}} $vgpr0, implicit $m0, implicit $exec {
-; MIR-NEXT: DS_GWS_BARRIER renamable $vgpr0, 0, implicit $m0, implicit $exec :: (load (s32) from custom "GWSResource")
-; MIR-NEXT: S_WAITCNT 0
-; MIR-NEXT: }
define amdgpu_kernel void @gws_barrier_offset0(i32 %val) #0 {
+ ; MIR-LABEL: name: gws_barrier_offset0
+ ; MIR: bb.0 (%ir-block.0):
+ ; MIR-NEXT: liveins: $sgpr8_sgpr9
+ ; MIR-NEXT: {{ $}}
+ ; MIR-NEXT: renamable $sgpr4 = S_LOAD_DWORD_IMM killed renamable $sgpr8_sgpr9, 0, 0 :: (dereferenceable invariant load (s32) from %ir.val.kernarg.offset, align 16, addrspace 4)
+ ; MIR-NEXT: $m0 = S_MOV_B32 0
+ ; MIR-NEXT: $vgpr0 = V_MOV_B32_e32 killed $sgpr4, implicit $exec, implicit $exec
+ ; MIR-NEXT: BUNDLE implicit killed renamable $vgpr0, implicit $m0, implicit $exec :: (load (s32) from custom "GWSResource") {
+ ; MIR-NEXT: DS_GWS_BARRIER renamable $vgpr0, 0, implicit $m0, implicit $exec :: (load (s32) from custom "GWSResource")
+ ; MIR-NEXT: S_WAITCNT 0
+ ; MIR-NEXT: }
+ ; MIR-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.ds.gws.barrier(i32 %val, i32 0)
ret void
}
@@ -17,5 +24,3 @@ declare void @llvm.amdgcn.ds.gws.barrier(i32, i32) #1
attributes #0 = { nounwind }
attributes #1 = { convergent inaccessiblememonly nounwind }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; MIR: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.gws.barrier.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.gws.barrier.ll
index c5f6e2b..417b8e0 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.gws.barrier.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ds.gws.barrier.ll
@@ -35,7 +35,7 @@
; LOOP-NEXT: s_cbranch_scc1 [[LOOP]]
; MIR-LABEL: name: gws_barrier_offset0{{$}}
-; MIR: BUNDLE implicit{{( killed)?( renamable)?}} $vgpr0, implicit $m0, implicit $exec {
+; MIR: BUNDLE implicit{{( killed)?( renamable)?}} $vgpr0, implicit $m0, implicit $exec
; MIR-NEXT: DS_GWS_BARRIER renamable $vgpr0, 0, implicit $m0, implicit $exec :: (load (s32) from custom "GWSResource")
; MIR-NEXT: S_WAITCNT 0
; MIR-NEXT: }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fdot2.bf16.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fdot2.bf16.bf16.ll
index 4419b8c..af270e5 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fdot2.bf16.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fdot2.bf16.bf16.ll
@@ -13,9 +13,9 @@ define amdgpu_kernel void @test_llvm_amdgcn_fdot2_bf16_bf16(
; SDAG-GFX11-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; SDAG-GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0
; SDAG-GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v1, s[6:7]
; SDAG-GFX11-TRUE16-NEXT: s_load_b32 s2, s[2:3], 0x0
; SDAG-GFX11-TRUE16-NEXT: s_load_b32 s3, s[4:5], 0x0
+; SDAG-GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v1, s[6:7]
; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; SDAG-GFX11-TRUE16-NEXT: v_dot2_bf16_bf16 v0.l, s2, s3, v0.l
; SDAG-GFX11-TRUE16-NEXT: global_store_b16 v1, v0, s[0:1]
@@ -26,9 +26,9 @@ define amdgpu_kernel void @test_llvm_amdgcn_fdot2_bf16_bf16(
; SDAG-GFX11-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; SDAG-GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, 0
; SDAG-GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX11-FAKE16-NEXT: global_load_u16 v1, v0, s[6:7]
; SDAG-GFX11-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
; SDAG-GFX11-FAKE16-NEXT: s_load_b32 s3, s[4:5], 0x0
+; SDAG-GFX11-FAKE16-NEXT: global_load_u16 v1, v0, s[6:7]
; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; SDAG-GFX11-FAKE16-NEXT: v_dot2_bf16_bf16 v1, s2, s3, v1
; SDAG-GFX11-FAKE16-NEXT: global_store_b16 v0, v1, s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fdot2.f16.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fdot2.f16.f16.ll
index 0194d25..72b4769 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fdot2.f16.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fdot2.f16.f16.ll
@@ -12,9 +12,9 @@ define amdgpu_kernel void @test_llvm_amdgcn_fdot2_f16_f16(
; SDAG-GFX11-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; SDAG-GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0
; SDAG-GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v1, s[6:7]
; SDAG-GFX11-TRUE16-NEXT: s_load_b32 s2, s[2:3], 0x0
; SDAG-GFX11-TRUE16-NEXT: s_load_b32 s3, s[4:5], 0x0
+; SDAG-GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v1, s[6:7]
; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; SDAG-GFX11-TRUE16-NEXT: v_dot2_f16_f16 v0.l, s2, s3, v0.l
; SDAG-GFX11-TRUE16-NEXT: global_store_b16 v1, v0, s[0:1]
@@ -25,9 +25,9 @@ define amdgpu_kernel void @test_llvm_amdgcn_fdot2_f16_f16(
; SDAG-GFX11-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; SDAG-GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, 0
; SDAG-GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-GFX11-FAKE16-NEXT: global_load_u16 v1, v0, s[6:7]
; SDAG-GFX11-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
; SDAG-GFX11-FAKE16-NEXT: s_load_b32 s3, s[4:5], 0x0
+; SDAG-GFX11-FAKE16-NEXT: global_load_u16 v1, v0, s[6:7]
; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; SDAG-GFX11-FAKE16-NEXT: v_dot2_f16_f16 v1, s2, s3, v1
; SDAG-GFX11-FAKE16-NEXT: global_store_b16 v0, v1, s[0:1]
@@ -38,9 +38,9 @@ define amdgpu_kernel void @test_llvm_amdgcn_fdot2_f16_f16(
; GISEL-GFX11-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GISEL-GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0
; GISEL-GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v1, s[6:7]
; GISEL-GFX11-TRUE16-NEXT: s_load_b32 s2, s[2:3], 0x0
; GISEL-GFX11-TRUE16-NEXT: s_load_b32 s3, s[4:5], 0x0
+; GISEL-GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v1, s[6:7]
; GISEL-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GISEL-GFX11-TRUE16-NEXT: v_dot2_f16_f16 v0.l, s2, s3, v0.l
; GISEL-GFX11-TRUE16-NEXT: global_store_b16 v1, v0, s[0:1]
@@ -51,9 +51,9 @@ define amdgpu_kernel void @test_llvm_amdgcn_fdot2_f16_f16(
; GISEL-GFX11-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GISEL-GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, 0
; GISEL-GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX11-FAKE16-NEXT: global_load_u16 v1, v0, s[6:7]
; GISEL-GFX11-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
; GISEL-GFX11-FAKE16-NEXT: s_load_b32 s3, s[4:5], 0x0
+; GISEL-GFX11-FAKE16-NEXT: global_load_u16 v1, v0, s[6:7]
; GISEL-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GISEL-GFX11-FAKE16-NEXT: v_dot2_f16_f16 v1, s2, s3, v1
; GISEL-GFX11-FAKE16-NEXT: global_store_b16 v0, v1, s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll
index 0c1448a..1d08097 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll
@@ -17,21 +17,19 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x64_f16__vgpr(ptr addrspace(1) %
; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 4, v0
-; SDAG-NEXT: v_mov_b32_e32 v4, 0
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: global_load_dwordx4 v[0:3], v0, s[6:7]
; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
+; SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-NEXT: global_load_dwordx4 v[0:3], v0, s[6:7]
; SDAG-NEXT: v_mov_b64_e32 v[16:17], s[2:3]
; SDAG-NEXT: v_mov_b64_e32 v[14:15], s[0:1]
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
; SDAG-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
; SDAG-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
; SDAG-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
; SDAG-NEXT: v_mov_b32_e32 v5, s16
+; SDAG-NEXT: v_mov_b32_e32 v4, 0
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: s_nop 0
; SDAG-NEXT: v_smfmac_f32_16x16x64_f16 v[0:3], v[14:17], v[6:13], v5 cbsz:1 abid:2
; SDAG-NEXT: s_nop 7
; SDAG-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
@@ -43,13 +41,12 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x64_f16__vgpr(ptr addrspace(1) %
; GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
; GISEL-NEXT: s_load_dword s16, s[4:5], 0x64
+; GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[2:3]
; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[0:1]
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[12:13]
@@ -175,16 +172,15 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x32_f16__vgpr(ptr addrspace(1) %
; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
+; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: global_load_dwordx4 v[12:15], v16, s[6:7] offset:48
; SDAG-NEXT: global_load_dwordx4 v[8:11], v16, s[6:7] offset:32
; SDAG-NEXT: global_load_dwordx4 v[4:7], v16, s[6:7] offset:16
; SDAG-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
-; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
-; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
; SDAG-NEXT: v_mov_b64_e32 v[28:29], s[2:3]
; SDAG-NEXT: v_mov_b64_e32 v[26:27], s[0:1]
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b64_e32 v[24:25], s[14:15]
; SDAG-NEXT: v_mov_b64_e32 v[22:23], s[12:13]
; SDAG-NEXT: v_mov_b64_e32 v[20:21], s[10:11]
@@ -207,16 +203,15 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x32_f16__vgpr(ptr addrspace(1) %
; GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
+; GISEL-NEXT: s_load_dword s16, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
; GISEL-NEXT: global_load_dwordx4 v[4:7], v16, s[6:7] offset:16
; GISEL-NEXT: global_load_dwordx4 v[8:11], v16, s[6:7] offset:32
; GISEL-NEXT: global_load_dwordx4 v[12:15], v16, s[6:7] offset:48
-; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
-; GISEL-NEXT: s_load_dword s16, s[4:5], 0x64
; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[2:3]
; GISEL-NEXT: v_mov_b64_e32 v[24:25], s[0:1]
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[22:23], s[14:15]
; GISEL-NEXT: v_mov_b64_e32 v[20:21], s[12:13]
; GISEL-NEXT: v_mov_b64_e32 v[18:19], s[10:11]
@@ -520,21 +515,19 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x64_bf16__vgpr(ptr addrspace(1)
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34
; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v0, 4, v0
-; GCN-NEXT: v_mov_b32_e32 v4, 0
-; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: global_load_dwordx4 v[0:3], v0, s[6:7]
; GCN-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
; GCN-NEXT: s_load_dword s16, s[4:5], 0x64
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: global_load_dwordx4 v[0:3], v0, s[6:7]
; GCN-NEXT: v_mov_b64_e32 v[16:17], s[2:3]
; GCN-NEXT: v_mov_b64_e32 v[14:15], s[0:1]
-; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b64_e32 v[6:7], s[8:9]
; GCN-NEXT: v_mov_b64_e32 v[8:9], s[10:11]
; GCN-NEXT: v_mov_b64_e32 v[10:11], s[12:13]
; GCN-NEXT: v_mov_b64_e32 v[12:13], s[14:15]
; GCN-NEXT: v_mov_b32_e32 v5, s16
+; GCN-NEXT: v_mov_b32_e32 v4, 0
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: s_nop 0
; GCN-NEXT: v_smfmac_f32_16x16x64_bf16 v[0:3], v[14:17], v[6:13], v5 cbsz:1 abid:2
; GCN-NEXT: s_nop 7
; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
@@ -634,16 +627,15 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x32_bf16__vgpr(ptr addrspace(1)
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34
; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GCN-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; GCN-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
+; GCN-NEXT: s_load_dword s16, s[4:5], 0x64
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_load_dwordx4 v[12:15], v16, s[6:7] offset:48
; GCN-NEXT: global_load_dwordx4 v[8:11], v16, s[6:7] offset:32
; GCN-NEXT: global_load_dwordx4 v[4:7], v16, s[6:7] offset:16
; GCN-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
-; GCN-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44
-; GCN-NEXT: s_load_dword s16, s[4:5], 0x64
; GCN-NEXT: v_mov_b64_e32 v[28:29], s[2:3]
; GCN-NEXT: v_mov_b64_e32 v[26:27], s[0:1]
-; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b64_e32 v[24:25], s[14:15]
; GCN-NEXT: v_mov_b64_e32 v[22:23], s[12:13]
; GCN-NEXT: v_mov_b64_e32 v[20:21], s[10:11]
@@ -802,11 +794,11 @@ define amdgpu_kernel void @test_smfmac_i32_16x16x128_i8__vgpr(ptr addrspace(1) %
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; SDAG-NEXT: v_mov_b32_e32 v16, 0
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
; SDAG-NEXT: v_mov_b32_e32 v12, s8
; SDAG-NEXT: v_mov_b32_e32 v13, s9
; SDAG-NEXT: v_mov_b32_e32 v14, s10
@@ -815,7 +807,6 @@ define amdgpu_kernel void @test_smfmac_i32_16x16x128_i8__vgpr(ptr addrspace(1) %
; SDAG-NEXT: v_mov_b32_e32 v1, s13
; SDAG-NEXT: v_mov_b32_e32 v2, s14
; SDAG-NEXT: v_mov_b32_e32 v3, s15
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v4, s0
; SDAG-NEXT: v_mov_b32_e32 v5, s1
; SDAG-NEXT: v_mov_b32_e32 v6, s2
@@ -833,12 +824,11 @@ define amdgpu_kernel void @test_smfmac_i32_16x16x128_i8__vgpr(ptr addrspace(1) %
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1]
; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1]
; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
@@ -965,15 +955,14 @@ define amdgpu_kernel void @test_smfmac_i32_32x32x64_i8__vgpr(ptr addrspace(1) %a
; SDAG-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
+; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: global_load_dwordx4 v[12:15], v16, s[6:7] offset:48
; SDAG-NEXT: global_load_dwordx4 v[8:11], v16, s[6:7] offset:32
; SDAG-NEXT: global_load_dwordx4 v[4:7], v16, s[6:7] offset:16
; SDAG-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
-; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
-; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v24, s8
; SDAG-NEXT: v_mov_b32_e32 v25, s9
; SDAG-NEXT: v_mov_b32_e32 v26, s10
@@ -1003,15 +992,14 @@ define amdgpu_kernel void @test_smfmac_i32_32x32x64_i8__vgpr(ptr addrspace(1) %a
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
+; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: global_load_dwordx4 v[0:3], v16, s[0:1]
; GISEL-NEXT: global_load_dwordx4 v[4:7], v16, s[0:1] offset:16
; GISEL-NEXT: global_load_dwordx4 v[8:11], v16, s[0:1] offset:32
; GISEL-NEXT: global_load_dwordx4 v[12:15], v16, s[0:1] offset:48
-; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
-; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[24:25], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[22:23], s[18:19]
@@ -1317,11 +1305,11 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_bf8_bf8__vgpr(ptr addrspace
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; SDAG-NEXT: v_mov_b32_e32 v16, 0
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
; SDAG-NEXT: v_mov_b32_e32 v12, s8
; SDAG-NEXT: v_mov_b32_e32 v13, s9
; SDAG-NEXT: v_mov_b32_e32 v14, s10
@@ -1330,7 +1318,6 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_bf8_bf8__vgpr(ptr addrspace
; SDAG-NEXT: v_mov_b32_e32 v1, s13
; SDAG-NEXT: v_mov_b32_e32 v2, s14
; SDAG-NEXT: v_mov_b32_e32 v3, s15
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v4, s0
; SDAG-NEXT: v_mov_b32_e32 v5, s1
; SDAG-NEXT: v_mov_b32_e32 v6, s2
@@ -1348,12 +1335,11 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_bf8_bf8__vgpr(ptr addrspace
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1]
; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1]
; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
@@ -1481,11 +1467,11 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_bf8_fp8__vgpr(ptr addrspace
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; SDAG-NEXT: v_mov_b32_e32 v16, 0
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
; SDAG-NEXT: v_mov_b32_e32 v12, s8
; SDAG-NEXT: v_mov_b32_e32 v13, s9
; SDAG-NEXT: v_mov_b32_e32 v14, s10
@@ -1494,7 +1480,6 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_bf8_fp8__vgpr(ptr addrspace
; SDAG-NEXT: v_mov_b32_e32 v1, s13
; SDAG-NEXT: v_mov_b32_e32 v2, s14
; SDAG-NEXT: v_mov_b32_e32 v3, s15
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v4, s0
; SDAG-NEXT: v_mov_b32_e32 v5, s1
; SDAG-NEXT: v_mov_b32_e32 v6, s2
@@ -1512,12 +1497,11 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_bf8_fp8__vgpr(ptr addrspace
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1]
; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1]
; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
@@ -1645,11 +1629,11 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_fp8_bf8__vgpr(ptr addrspace
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; SDAG-NEXT: v_mov_b32_e32 v16, 0
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
; SDAG-NEXT: v_mov_b32_e32 v12, s8
; SDAG-NEXT: v_mov_b32_e32 v13, s9
; SDAG-NEXT: v_mov_b32_e32 v14, s10
@@ -1658,7 +1642,6 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_fp8_bf8__vgpr(ptr addrspace
; SDAG-NEXT: v_mov_b32_e32 v1, s13
; SDAG-NEXT: v_mov_b32_e32 v2, s14
; SDAG-NEXT: v_mov_b32_e32 v3, s15
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v4, s0
; SDAG-NEXT: v_mov_b32_e32 v5, s1
; SDAG-NEXT: v_mov_b32_e32 v6, s2
@@ -1676,12 +1659,11 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_fp8_bf8__vgpr(ptr addrspace
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1]
; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1]
; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
@@ -1809,11 +1791,11 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_fp8_fp8__vgpr(ptr addrspace
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; SDAG-NEXT: v_mov_b32_e32 v16, 0
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
+; SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; SDAG-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7]
+; SDAG-NEXT: v_mov_b32_e32 v16, 0
; SDAG-NEXT: v_mov_b32_e32 v12, s8
; SDAG-NEXT: v_mov_b32_e32 v13, s9
; SDAG-NEXT: v_mov_b32_e32 v14, s10
@@ -1822,7 +1804,6 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_fp8_fp8__vgpr(ptr addrspace
; SDAG-NEXT: v_mov_b32_e32 v1, s13
; SDAG-NEXT: v_mov_b32_e32 v2, s14
; SDAG-NEXT: v_mov_b32_e32 v3, s15
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v4, s0
; SDAG-NEXT: v_mov_b32_e32 v5, s1
; SDAG-NEXT: v_mov_b32_e32 v6, s2
@@ -1840,12 +1821,11 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_fp8_fp8__vgpr(ptr addrspace
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1]
; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1]
; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
@@ -1972,15 +1952,14 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x64_bf8_bf8__vgpr(ptr addrspace(
; SDAG-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
+; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: global_load_dwordx4 v[12:15], v16, s[6:7] offset:48
; SDAG-NEXT: global_load_dwordx4 v[8:11], v16, s[6:7] offset:32
; SDAG-NEXT: global_load_dwordx4 v[4:7], v16, s[6:7] offset:16
; SDAG-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
-; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
-; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v24, s8
; SDAG-NEXT: v_mov_b32_e32 v25, s9
; SDAG-NEXT: v_mov_b32_e32 v26, s10
@@ -2010,15 +1989,14 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x64_bf8_bf8__vgpr(ptr addrspace(
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
+; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: global_load_dwordx4 v[0:3], v16, s[0:1]
; GISEL-NEXT: global_load_dwordx4 v[4:7], v16, s[0:1] offset:16
; GISEL-NEXT: global_load_dwordx4 v[8:11], v16, s[0:1] offset:32
; GISEL-NEXT: global_load_dwordx4 v[12:15], v16, s[0:1] offset:48
-; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
-; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[24:25], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[22:23], s[18:19]
@@ -2323,15 +2301,14 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x64_bf8_fp8__vgpr(ptr addrspace(
; SDAG-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
+; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: global_load_dwordx4 v[12:15], v16, s[6:7] offset:48
; SDAG-NEXT: global_load_dwordx4 v[8:11], v16, s[6:7] offset:32
; SDAG-NEXT: global_load_dwordx4 v[4:7], v16, s[6:7] offset:16
; SDAG-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
-; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
-; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v24, s8
; SDAG-NEXT: v_mov_b32_e32 v25, s9
; SDAG-NEXT: v_mov_b32_e32 v26, s10
@@ -2361,15 +2338,14 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x64_bf8_fp8__vgpr(ptr addrspace(
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
+; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: global_load_dwordx4 v[0:3], v16, s[0:1]
; GISEL-NEXT: global_load_dwordx4 v[4:7], v16, s[0:1] offset:16
; GISEL-NEXT: global_load_dwordx4 v[8:11], v16, s[0:1] offset:32
; GISEL-NEXT: global_load_dwordx4 v[12:15], v16, s[0:1] offset:48
-; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
-; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[24:25], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[22:23], s[18:19]
@@ -2674,15 +2650,14 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x64_fp8_bf8__vgpr(ptr addrspace(
; SDAG-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
+; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: global_load_dwordx4 v[12:15], v16, s[6:7] offset:48
; SDAG-NEXT: global_load_dwordx4 v[8:11], v16, s[6:7] offset:32
; SDAG-NEXT: global_load_dwordx4 v[4:7], v16, s[6:7] offset:16
; SDAG-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
-; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
-; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v24, s8
; SDAG-NEXT: v_mov_b32_e32 v25, s9
; SDAG-NEXT: v_mov_b32_e32 v26, s10
@@ -2712,15 +2687,14 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x64_fp8_bf8__vgpr(ptr addrspace(
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
+; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: global_load_dwordx4 v[0:3], v16, s[0:1]
; GISEL-NEXT: global_load_dwordx4 v[4:7], v16, s[0:1] offset:16
; GISEL-NEXT: global_load_dwordx4 v[8:11], v16, s[0:1] offset:32
; GISEL-NEXT: global_load_dwordx4 v[12:15], v16, s[0:1] offset:48
-; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
-; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[24:25], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[22:23], s[18:19]
@@ -3025,15 +2999,14 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x64_fp8_fp8__vgpr(ptr addrspace(
; SDAG-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
; SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; SDAG-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
+; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: global_load_dwordx4 v[12:15], v16, s[6:7] offset:48
; SDAG-NEXT: global_load_dwordx4 v[8:11], v16, s[6:7] offset:32
; SDAG-NEXT: global_load_dwordx4 v[4:7], v16, s[6:7] offset:16
; SDAG-NEXT: global_load_dwordx4 v[0:3], v16, s[6:7]
-; SDAG-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; SDAG-NEXT: s_load_dword s16, s[4:5], 0x64
-; SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x54
-; SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v24, s8
; SDAG-NEXT: v_mov_b32_e32 v25, s9
; SDAG-NEXT: v_mov_b32_e32 v26, s10
@@ -3063,15 +3036,14 @@ define amdgpu_kernel void @test_smfmac_f32_32x32x64_fp8_fp8__vgpr(ptr addrspace(
; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GISEL-NEXT: v_lshlrev_b32_e32 v16, 6, v0
+; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
+; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
+; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: global_load_dwordx4 v[0:3], v16, s[0:1]
; GISEL-NEXT: global_load_dwordx4 v[4:7], v16, s[0:1] offset:16
; GISEL-NEXT: global_load_dwordx4 v[8:11], v16, s[0:1] offset:32
; GISEL-NEXT: global_load_dwordx4 v[12:15], v16, s[0:1] offset:48
-; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
-; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54
-; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64
-; GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[10:11]
; GISEL-NEXT: v_mov_b64_e32 v[24:25], s[8:9]
; GISEL-NEXT: v_mov_b64_e32 v[22:23], s[18:19]
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
index f93e5f0..83c240c 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
@@ -10386,7 +10386,8 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o
; GFX8-NEXT: s_add_u32 s2, s0, 0x150
; GFX8-NEXT: s_addc_u32 s3, s1, 0
; GFX8-NEXT: flat_store_dwordx4 v[44:45], v[12:15]
-; GFX8-NEXT: flat_store_dwordx4 v[42:43], v[4:7]
+; GFX8-NEXT: flat_store_dwordx4 v[48:49], v[8:11]
+; GFX8-NEXT: flat_store_dwordx4 v[50:51], v[16:19]
; GFX8-NEXT: v_mov_b32_e32 v13, s3
; GFX8-NEXT: v_mov_b32_e32 v12, s2
; GFX8-NEXT: s_add_u32 s2, s0, 0x140
@@ -10395,10 +10396,6 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o
; GFX8-NEXT: v_mov_b32_e32 v14, s2
; GFX8-NEXT: s_add_u32 s2, s0, 0x130
; GFX8-NEXT: s_addc_u32 s3, s1, 0
-; GFX8-NEXT: flat_store_dwordx4 v[46:47], v[0:3]
-; GFX8-NEXT: flat_store_dwordx4 v[48:49], v[8:11]
-; GFX8-NEXT: flat_store_dwordx4 v[50:51], v[16:19]
-; GFX8-NEXT: v_mov_b32_e32 v4, s6
; GFX8-NEXT: v_mov_b32_e32 v17, s3
; GFX8-NEXT: v_mov_b32_e32 v16, s2
; GFX8-NEXT: s_add_u32 s2, s0, 0x120
@@ -10406,20 +10403,21 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o
; GFX8-NEXT: v_mov_b32_e32 v19, s3
; GFX8-NEXT: v_mov_b32_e32 v18, s2
; GFX8-NEXT: s_add_u32 s2, s0, 0x110
-; GFX8-NEXT: v_mov_b32_e32 v5, s7
+; GFX8-NEXT: flat_store_dwordx4 v[42:43], v[4:7]
; GFX8-NEXT: s_addc_u32 s3, s1, 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NEXT: v_mov_b32_e32 v5, s7
; GFX8-NEXT: v_mov_b32_e32 v42, vcc_lo
; GFX8-NEXT: v_mov_b32_e32 v43, vcc_hi
; GFX8-NEXT: v_mov_b32_e32 v6, s4
; GFX8-NEXT: v_mov_b32_e32 v7, s5
+; GFX8-NEXT: flat_store_dwordx4 v[46:47], v[0:3]
+; GFX8-NEXT: v_mov_b32_e32 v8, s12
; GFX8-NEXT: v_mov_b32_e32 v0, s8
; GFX8-NEXT: v_mov_b32_e32 v1, s9
-; GFX8-NEXT: v_mov_b32_e32 v8, s12
-; GFX8-NEXT: flat_store_dwordx4 v[52:53], v[20:23]
; GFX8-NEXT: v_mov_b32_e32 v2, s10
; GFX8-NEXT: v_mov_b32_e32 v3, s11
; GFX8-NEXT: v_mov_b32_e32 v9, s13
-; GFX8-NEXT: flat_store_dwordx4 v[54:55], v[24:27]
; GFX8-NEXT: v_mov_b32_e32 v10, s14
; GFX8-NEXT: v_mov_b32_e32 v11, s15
; GFX8-NEXT: flat_store_dwordx4 v[56:57], v[28:31]
@@ -10588,6 +10586,8 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_mov_b32_e32 v3, s3
; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_store_dwordx4 v[52:53], v[20:23]
+; GFX8-NEXT: flat_store_dwordx4 v[54:55], v[24:27]
; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
index bca39d0..59f4a9d 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -4582,18 +4582,18 @@ define amdgpu_kernel void @global_sextload_v64i16_to_v64i32(ptr addrspace(1) %ou
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x90
-; GCN-HSA-NEXT: flat_store_dwordx4 v[32:33], v[12:15]
-; GCN-HSA-NEXT: flat_store_dwordx4 v[38:39], v[20:23]
; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
; GCN-HSA-NEXT: v_mov_b32_e32 v0, s2
; GCN-HSA-NEXT: v_mov_b32_e32 v1, s3
; GCN-HSA-NEXT: s_add_u32 s2, s0, 0x60
+; GCN-HSA-NEXT: flat_store_dwordx4 v[32:33], v[12:15]
+; GCN-HSA-NEXT: flat_store_dwordx4 v[38:39], v[20:23]
+; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
; GCN-HSA-NEXT: v_ashrrev_i32_e32 v23, 16, v3
; GCN-HSA-NEXT: v_ashrrev_i32_e32 v21, 16, v2
; GCN-HSA-NEXT: v_bfe_i32 v22, v3, 0, 16
; GCN-HSA-NEXT: v_bfe_i32 v20, v2, 0, 16
-; GCN-HSA-NEXT: s_addc_u32 s3, s1, 0
; GCN-HSA-NEXT: flat_store_dwordx4 v[0:1], v[20:23]
; GCN-HSA-NEXT: s_waitcnt vmcnt(11)
; GCN-HSA-NEXT: v_ashrrev_i32_e32 v3, 16, v9
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i32.ll b/llvm/test/CodeGen/AMDGPU/load-global-i32.ll
index e55fb2ca..7203545 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i32.ll
@@ -3313,12 +3313,12 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou
; GCNX3-HSA-NEXT: v_mov_b32_e32 v4, s2
; GCNX3-HSA-NEXT: s_add_u32 s2, s0, 0x90
; GCNX3-HSA-NEXT: s_addc_u32 s3, s1, 0
-; GCNX3-HSA-NEXT: flat_store_dwordx4 v[32:33], v[24:27]
-; GCNX3-HSA-NEXT: flat_store_dwordx4 v[38:39], v[20:23]
; GCNX3-HSA-NEXT: flat_store_dwordx4 v[4:5], v[12:15]
; GCNX3-HSA-NEXT: v_mov_b32_e32 v5, s3
; GCNX3-HSA-NEXT: v_mov_b32_e32 v4, s2
; GCNX3-HSA-NEXT: s_add_u32 s2, s0, 0x60
+; GCNX3-HSA-NEXT: flat_store_dwordx4 v[32:33], v[24:27]
+; GCNX3-HSA-NEXT: flat_store_dwordx4 v[38:39], v[20:23]
; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v26, 31, v7
; GCNX3-HSA-NEXT: v_ashrrev_i32_e32 v24, 31, v6
; GCNX3-HSA-NEXT: v_mov_b32_e32 v23, v6
@@ -3726,7 +3726,6 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou
; GCN-GFX900-HSA-NEXT: s_nop 0
; GCN-GFX900-HSA-NEXT: global_store_dwordx4 v12, v[37:40], s[0:1] offset:224
; GCN-GFX900-HSA-NEXT: global_store_dwordx4 v12, v[33:36], s[0:1] offset:240
-; GCN-GFX900-HSA-NEXT: global_store_dwordx4 v12, v[8:11], s[0:1] offset:192
; GCN-GFX900-HSA-NEXT: buffer_load_dword v33, off, s[20:23], 0 ; 4-byte Folded Reload
; GCN-GFX900-HSA-NEXT: s_nop 0
; GCN-GFX900-HSA-NEXT: buffer_load_dword v34, off, s[20:23], 0 offset:4 ; 4-byte Folded Reload
@@ -3740,7 +3739,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou
; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v43, v26
; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v29, v27
; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v31, v28
-; GCN-GFX900-HSA-NEXT: s_waitcnt vmcnt(12)
+; GCN-GFX900-HSA-NEXT: s_waitcnt vmcnt(11)
; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v60, 31, v3
; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v58, 31, v2
; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v28, 31, v1
@@ -3749,6 +3748,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou
; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v27, v1
; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v57, v2
; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v59, v3
+; GCN-GFX900-HSA-NEXT: global_store_dwordx4 v12, v[8:11], s[0:1] offset:192
; GCN-GFX900-HSA-NEXT: s_waitcnt vmcnt(7)
; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v3, 31, v24
; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v1, 31, v23
@@ -3758,7 +3758,7 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou
; GCN-GFX900-HSA-NEXT: v_ashrrev_i32_e32 v9, 31, v21
; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v8, v21
; GCN-GFX900-HSA-NEXT: v_mov_b32_e32 v10, v22
-; GCN-GFX900-HSA-NEXT: s_waitcnt vmcnt(0)
+; GCN-GFX900-HSA-NEXT: s_waitcnt vmcnt(1)
; GCN-GFX900-HSA-NEXT: global_store_dwordx4 v12, v[33:36], s[0:1] offset:208
; GCN-GFX900-HSA-NEXT: global_store_dwordx4 v12, v[41:44], s[0:1] offset:160
; GCN-GFX900-HSA-NEXT: global_store_dwordx4 v12, v[29:32], s[0:1] offset:176
diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i8.ll b/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
index f879dc6..cb17f01 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i8.ll
@@ -7788,19 +7788,18 @@ define amdgpu_kernel void @global_zextload_v32i8_to_v32i64(ptr addrspace(1) %out
; GCN-NOHSA-VI-NEXT: v_bfe_u32 v29, v13, 16, 8
; GCN-NOHSA-VI-NEXT: v_bfe_u32 v35, v14, 8, 8
; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v33, 0xff, v14
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, v53
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, v53
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[36:39], off, s[0:3], 0 offset:144
; GCN-NOHSA-VI-NEXT: v_bfe_u32 v16, v17, 8, 8
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v36, v53
; GCN-NOHSA-VI-NEXT: v_and_b32_e32 v14, 0xff, v17
; GCN-NOHSA-VI-NEXT: v_lshrrev_b32_e32 v54, 24, v17
; GCN-NOHSA-VI-NEXT: v_bfe_u32 v52, v17, 16, 8
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v11, v53
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v13, v53
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, v53
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v17, v53
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[36:39], off, s[0:3], 0 offset:144
; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[29:32], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v36, v53
; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[10:13], off, s[0:3], 0 offset:48
-; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:224
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v15, v53
+; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v17, v53
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v29, v53
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v10, v53
; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[33:36], off, s[0:3], 0 offset:128
@@ -7810,7 +7809,7 @@ define amdgpu_kernel void @global_zextload_v32i8_to_v32i64(ptr addrspace(1) %out
; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:64
; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[7:10], off, s[0:3], 0 offset:32
; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[3:6], off, s[0:3], 0 offset:16
-; GCN-NOHSA-VI-NEXT: s_nop 0
+; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:224
; GCN-NOHSA-VI-NEXT: v_mov_b32_e32 v3, v53
; GCN-NOHSA-VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GCN-NOHSA-VI-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
index bd191a3..062a985 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
@@ -3172,27 +3172,25 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-NO-DS128-LABEL: local_zextload_v64i16_to_v64i32:
; VI-NO-DS128: ; %bb.0:
; VI-NO-DS128-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; VI-NO-DS128-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
; VI-NO-DS128-NEXT: s_mov_b32 m0, -1
+; VI-NO-DS128-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
; VI-NO-DS128-NEXT: s_mov_b32 s89, SCRATCH_RSRC_DWORD1
; VI-NO-DS128-NEXT: s_mov_b32 s90, -1
; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(0)
; VI-NO-DS128-NEXT: v_mov_b32_e32 v16, s1
; VI-NO-DS128-NEXT: ds_read2_b64 v[10:13], v16 offset1:1
; VI-NO-DS128-NEXT: ds_read2_b64 v[17:20], v16 offset0:2 offset1:3
+; VI-NO-DS128-NEXT: ds_read2_b64 v[21:24], v16 offset0:4 offset1:5
; VI-NO-DS128-NEXT: s_mov_b32 s91, 0xe80000
; VI-NO-DS128-NEXT: s_add_u32 s88, s88, s11
-; VI-NO-DS128-NEXT: s_addc_u32 s89, s89, 0
-; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(2)
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v1, 16, v11
-; VI-NO-DS128-NEXT: v_and_b32_e32 v0, 0xffff, v11
-; VI-NO-DS128-NEXT: buffer_store_dword v0, off, s[88:91], 0 ; 4-byte Folded Spill
-; VI-NO-DS128-NEXT: buffer_store_dword v1, off, s[88:91], 0 offset:4 ; 4-byte Folded Spill
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v3, 16, v10
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v5, 16, v13
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v7, 16, v12
-; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v9, 16, v18
+; VI-NO-DS128-NEXT: v_and_b32_e32 v0, 0xffff, v11
; VI-NO-DS128-NEXT: v_and_b32_e32 v2, 0xffff, v10
; VI-NO-DS128-NEXT: v_and_b32_e32 v4, 0xffff, v13
; VI-NO-DS128-NEXT: v_and_b32_e32 v6, 0xffff, v12
@@ -3200,7 +3198,6 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v13, 16, v20
; VI-NO-DS128-NEXT: v_and_b32_e32 v8, 0xffff, v18
; VI-NO-DS128-NEXT: v_and_b32_e32 v10, 0xffff, v17
-; VI-NO-DS128-NEXT: ds_read2_b64 v[21:24], v16 offset0:4 offset1:5
; VI-NO-DS128-NEXT: v_and_b32_e32 v12, 0xffff, v20
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v15, 16, v19
; VI-NO-DS128-NEXT: v_and_b32_e32 v14, 0xffff, v19
@@ -3243,17 +3240,19 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v56, 16, v19
; VI-NO-DS128-NEXT: v_and_b32_e32 v55, 0xffff, v19
; VI-NO-DS128-NEXT: ds_read2_b64 v[16:19], v16 offset0:14 offset1:15
+; VI-NO-DS128-NEXT: s_addc_u32 s89, s89, 0
+; VI-NO-DS128-NEXT: buffer_store_dword v0, off, s[88:91], 0 ; 4-byte Folded Spill
+; VI-NO-DS128-NEXT: buffer_store_dword v1, off, s[88:91], 0 offset:4 ; 4-byte Folded Spill
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v54, 16, v20
; VI-NO-DS128-NEXT: v_and_b32_e32 v53, 0xffff, v20
-; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
-; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v58, 16, v22
-; VI-NO-DS128-NEXT: v_and_b32_e32 v57, 0xffff, v22
; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(0)
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v20, 16, v19
; VI-NO-DS128-NEXT: v_and_b32_e32 v19, 0xffff, v19
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v1, 16, v18
; VI-NO-DS128-NEXT: v_and_b32_e32 v0, 0xffff, v18
; VI-NO-DS128-NEXT: v_mov_b32_e32 v18, s0
+; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v58, 16, v22
+; VI-NO-DS128-NEXT: v_and_b32_e32 v57, 0xffff, v22
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v22, 16, v21
; VI-NO-DS128-NEXT: v_and_b32_e32 v21, 0xffff, v21
; VI-NO-DS128-NEXT: v_lshrrev_b32_e32 v60, 16, v24
@@ -3296,21 +3295,17 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-NO-DS128-NEXT: v_mov_b32_e32 v56, s1
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[10:13], v56 offset1:1
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[14:17], v56 offset0:2 offset1:3
-; GFX9-NO-DS128-NEXT: s_add_u32 s12, s12, s11
-; GFX9-NO-DS128-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
-; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v1, 16, v11
-; GFX9-NO-DS128-NEXT: v_and_b32_e32 v0, 0xffff, v11
-; GFX9-NO-DS128-NEXT: buffer_store_dword v0, off, s[12:15], 0 ; 4-byte Folded Spill
-; GFX9-NO-DS128-NEXT: s_nop 0
-; GFX9-NO-DS128-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[18:21], v56 offset0:4 offset1:5
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[22:25], v56 offset0:6 offset1:7
+; GFX9-NO-DS128-NEXT: s_add_u32 s12, s12, s11
+; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(3)
+; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v1, 16, v11
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v3, 16, v10
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v5, 16, v13
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v7, 16, v12
; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(2)
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v9, 16, v15
+; GFX9-NO-DS128-NEXT: v_and_b32_e32 v0, 0xffff, v11
; GFX9-NO-DS128-NEXT: v_and_b32_e32 v2, 0xffff, v10
; GFX9-NO-DS128-NEXT: v_and_b32_e32 v4, 0xffff, v13
; GFX9-NO-DS128-NEXT: v_and_b32_e32 v6, 0xffff, v12
@@ -3337,9 +3332,11 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-NO-DS128-NEXT: v_and_b32_e32 v36, 0xffff, v22
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[16:19], v56 offset0:8 offset1:9
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[20:23], v56 offset0:10 offset1:11
+; GFX9-NO-DS128-NEXT: s_addc_u32 s13, s13, 0
+; GFX9-NO-DS128-NEXT: buffer_store_dword v0, off, s[12:15], 0 ; 4-byte Folded Spill
+; GFX9-NO-DS128-NEXT: s_nop 0
+; GFX9-NO-DS128-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GFX9-NO-DS128-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v39, 16, v25
-; GFX9-NO-DS128-NEXT: v_and_b32_e32 v38, 0xffff, v25
; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v41, 16, v17
; GFX9-NO-DS128-NEXT: v_and_b32_e32 v40, 0xffff, v17
@@ -3360,16 +3357,17 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v55, 16, v22
; GFX9-NO-DS128-NEXT: v_and_b32_e32 v54, 0xffff, v22
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[20:23], v56 offset0:14 offset1:15
+; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v39, 16, v25
+; GFX9-NO-DS128-NEXT: v_and_b32_e32 v38, 0xffff, v25
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v25, 16, v24
; GFX9-NO-DS128-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
-; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v57, 16, v17
-; GFX9-NO-DS128-NEXT: v_and_b32_e32 v56, 0xffff, v17
; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v63, 16, v23
; GFX9-NO-DS128-NEXT: v_and_b32_e32 v62, 0xffff, v23
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v23, 16, v22
; GFX9-NO-DS128-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v57, 16, v17
+; GFX9-NO-DS128-NEXT: v_and_b32_e32 v56, 0xffff, v17
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v17, 16, v16
; GFX9-NO-DS128-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX9-NO-DS128-NEXT: v_lshrrev_b32_e32 v59, 16, v19
@@ -3806,9 +3804,11 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-DS128-NEXT: ds_read_b128 v[16:19], v0 offset:16
; VI-DS128-NEXT: s_mov_b32 s91, 0xe80000
; VI-DS128-NEXT: s_add_u32 s88, s88, s11
-; VI-DS128-NEXT: s_addc_u32 s89, s89, 0
-; VI-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; VI-DS128-NEXT: ds_read_b128 v[20:23], v0 offset:32
+; VI-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:48
+; VI-DS128-NEXT: s_waitcnt lgkmcnt(3)
; VI-DS128-NEXT: v_lshrrev_b32_e32 v3, 16, v11
+; VI-DS128-NEXT: s_addc_u32 s89, s89, 0
; VI-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v10
; VI-DS128-NEXT: v_mov_b32_e32 v4, v3
; VI-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v11
@@ -3825,23 +3825,16 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-DS128-NEXT: buffer_store_dword v5, off, s[88:91], 0 offset:20 ; 4-byte Folded Spill
; VI-DS128-NEXT: buffer_store_dword v6, off, s[88:91], 0 offset:24 ; 4-byte Folded Spill
; VI-DS128-NEXT: buffer_store_dword v7, off, s[88:91], 0 offset:28 ; 4-byte Folded Spill
-; VI-DS128-NEXT: s_waitcnt lgkmcnt(0)
+; VI-DS128-NEXT: s_waitcnt lgkmcnt(2)
; VI-DS128-NEXT: v_lshrrev_b32_e32 v4, 16, v19
; VI-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v18
-; VI-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v19
-; VI-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v18
-; VI-DS128-NEXT: ds_read_b128 v[20:23], v0 offset:32
-; VI-DS128-NEXT: buffer_store_dword v1, off, s[88:91], 0 offset:32 ; 4-byte Folded Spill
-; VI-DS128-NEXT: buffer_store_dword v2, off, s[88:91], 0 offset:36 ; 4-byte Folded Spill
-; VI-DS128-NEXT: buffer_store_dword v3, off, s[88:91], 0 offset:40 ; 4-byte Folded Spill
-; VI-DS128-NEXT: buffer_store_dword v4, off, s[88:91], 0 offset:44 ; 4-byte Folded Spill
-; VI-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:48
-; VI-DS128-NEXT: ds_read_b128 v[36:39], v0 offset:64
; VI-DS128-NEXT: v_lshrrev_b32_e32 v15, 16, v17
; VI-DS128-NEXT: v_lshrrev_b32_e32 v13, 16, v16
+; VI-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v19
+; VI-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v18
; VI-DS128-NEXT: v_and_b32_e32 v14, 0xffff, v17
; VI-DS128-NEXT: v_and_b32_e32 v12, 0xffff, v16
-; VI-DS128-NEXT: s_waitcnt lgkmcnt(2)
+; VI-DS128-NEXT: s_waitcnt lgkmcnt(1)
; VI-DS128-NEXT: v_lshrrev_b32_e32 v19, 16, v23
; VI-DS128-NEXT: v_lshrrev_b32_e32 v17, 16, v22
; VI-DS128-NEXT: v_lshrrev_b32_e32 v31, 16, v21
@@ -3850,21 +3843,25 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-DS128-NEXT: v_and_b32_e32 v16, 0xffff, v22
; VI-DS128-NEXT: v_and_b32_e32 v30, 0xffff, v21
; VI-DS128-NEXT: v_and_b32_e32 v28, 0xffff, v20
-; VI-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; VI-DS128-NEXT: s_waitcnt lgkmcnt(0)
; VI-DS128-NEXT: v_lshrrev_b32_e32 v23, 16, v27
; VI-DS128-NEXT: v_lshrrev_b32_e32 v21, 16, v26
; VI-DS128-NEXT: v_lshrrev_b32_e32 v35, 16, v25
; VI-DS128-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; VI-DS128-NEXT: v_and_b32_e32 v22, 0xffff, v27
+; VI-DS128-NEXT: ds_read_b128 v[36:39], v0 offset:64
; VI-DS128-NEXT: v_and_b32_e32 v20, 0xffff, v26
; VI-DS128-NEXT: v_and_b32_e32 v34, 0xffff, v25
; VI-DS128-NEXT: v_and_b32_e32 v32, 0xffff, v24
; VI-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:80
; VI-DS128-NEXT: ds_read_b128 v[55:58], v0 offset:96
+; VI-DS128-NEXT: buffer_store_dword v1, off, s[88:91], 0 offset:32 ; 4-byte Folded Spill
+; VI-DS128-NEXT: buffer_store_dword v2, off, s[88:91], 0 offset:36 ; 4-byte Folded Spill
+; VI-DS128-NEXT: buffer_store_dword v3, off, s[88:91], 0 offset:40 ; 4-byte Folded Spill
+; VI-DS128-NEXT: buffer_store_dword v4, off, s[88:91], 0 offset:44 ; 4-byte Folded Spill
; VI-DS128-NEXT: s_waitcnt lgkmcnt(2)
; VI-DS128-NEXT: v_lshrrev_b32_e32 v42, 16, v39
; VI-DS128-NEXT: v_lshrrev_b32_e32 v40, 16, v38
-; VI-DS128-NEXT: v_lshrrev_b32_e32 v46, 16, v37
; VI-DS128-NEXT: s_waitcnt lgkmcnt(1)
; VI-DS128-NEXT: v_lshrrev_b32_e32 v50, 16, v27
; VI-DS128-NEXT: v_lshrrev_b32_e32 v48, 16, v26
@@ -3875,16 +3872,17 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-DS128-NEXT: v_and_b32_e32 v53, 0xffff, v25
; VI-DS128-NEXT: v_and_b32_e32 v51, 0xffff, v24
; VI-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:112
+; VI-DS128-NEXT: v_lshrrev_b32_e32 v46, 16, v37
; VI-DS128-NEXT: v_lshrrev_b32_e32 v44, 16, v36
; VI-DS128-NEXT: v_and_b32_e32 v41, 0xffff, v39
; VI-DS128-NEXT: v_and_b32_e32 v39, 0xffff, v38
-; VI-DS128-NEXT: v_and_b32_e32 v45, 0xffff, v37
; VI-DS128-NEXT: s_waitcnt lgkmcnt(0)
; VI-DS128-NEXT: v_lshrrev_b32_e32 v3, 16, v25
; VI-DS128-NEXT: v_lshrrev_b32_e32 v1, 16, v24
; VI-DS128-NEXT: v_and_b32_e32 v2, 0xffff, v25
; VI-DS128-NEXT: v_and_b32_e32 v0, 0xffff, v24
; VI-DS128-NEXT: v_mov_b32_e32 v24, s0
+; VI-DS128-NEXT: v_and_b32_e32 v45, 0xffff, v37
; VI-DS128-NEXT: v_and_b32_e32 v43, 0xffff, v36
; VI-DS128-NEXT: v_lshrrev_b32_e32 v61, 16, v58
; VI-DS128-NEXT: v_lshrrev_b32_e32 v59, 16, v57
@@ -3943,9 +3941,11 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-DS128-NEXT: ds_read_b128 v[8:11], v0
; GFX9-DS128-NEXT: ds_read_b128 v[16:19], v0 offset:16
; GFX9-DS128-NEXT: s_add_u32 s12, s12, s11
-; GFX9-DS128-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; GFX9-DS128-NEXT: ds_read_b128 v[20:23], v0 offset:32
+; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:48
+; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(3)
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v3, 16, v11
+; GFX9-DS128-NEXT: s_addc_u32 s13, s13, 0
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v10
; GFX9-DS128-NEXT: v_mov_b32_e32 v4, v3
; GFX9-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v11
@@ -3964,24 +3964,16 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-DS128-NEXT: buffer_store_dword v5, off, s[12:15], 0 offset:20 ; 4-byte Folded Spill
; GFX9-DS128-NEXT: buffer_store_dword v6, off, s[12:15], 0 offset:24 ; 4-byte Folded Spill
; GFX9-DS128-NEXT: buffer_store_dword v7, off, s[12:15], 0 offset:28 ; 4-byte Folded Spill
-; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2)
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v4, 16, v19
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v18
-; GFX9-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v19
-; GFX9-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v18
-; GFX9-DS128-NEXT: ds_read_b128 v[20:23], v0 offset:32
-; GFX9-DS128-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:32 ; 4-byte Folded Spill
-; GFX9-DS128-NEXT: s_nop 0
-; GFX9-DS128-NEXT: buffer_store_dword v2, off, s[12:15], 0 offset:36 ; 4-byte Folded Spill
-; GFX9-DS128-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:40 ; 4-byte Folded Spill
-; GFX9-DS128-NEXT: buffer_store_dword v4, off, s[12:15], 0 offset:44 ; 4-byte Folded Spill
-; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:48
-; GFX9-DS128-NEXT: ds_read_b128 v[36:39], v0 offset:64
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v15, 16, v17
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v13, 16, v16
+; GFX9-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v19
+; GFX9-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v18
; GFX9-DS128-NEXT: v_and_b32_e32 v14, 0xffff, v17
; GFX9-DS128-NEXT: v_and_b32_e32 v12, 0xffff, v16
-; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2)
+; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1)
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v19, 16, v23
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v17, 16, v22
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v31, 16, v21
@@ -3990,21 +3982,26 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-DS128-NEXT: v_and_b32_e32 v16, 0xffff, v22
; GFX9-DS128-NEXT: v_and_b32_e32 v30, 0xffff, v21
; GFX9-DS128-NEXT: v_and_b32_e32 v28, 0xffff, v20
-; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v23, 16, v27
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v21, 16, v26
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v35, 16, v25
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; GFX9-DS128-NEXT: v_and_b32_e32 v22, 0xffff, v27
+; GFX9-DS128-NEXT: ds_read_b128 v[36:39], v0 offset:64
; GFX9-DS128-NEXT: v_and_b32_e32 v20, 0xffff, v26
; GFX9-DS128-NEXT: v_and_b32_e32 v34, 0xffff, v25
; GFX9-DS128-NEXT: v_and_b32_e32 v32, 0xffff, v24
; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:80
; GFX9-DS128-NEXT: ds_read_b128 v[55:58], v0 offset:96
+; GFX9-DS128-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:32 ; 4-byte Folded Spill
+; GFX9-DS128-NEXT: s_nop 0
+; GFX9-DS128-NEXT: buffer_store_dword v2, off, s[12:15], 0 offset:36 ; 4-byte Folded Spill
+; GFX9-DS128-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:40 ; 4-byte Folded Spill
+; GFX9-DS128-NEXT: buffer_store_dword v4, off, s[12:15], 0 offset:44 ; 4-byte Folded Spill
; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2)
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v42, 16, v39
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v40, 16, v38
-; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v46, 16, v37
; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1)
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v50, 16, v27
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v48, 16, v26
@@ -4015,16 +4012,17 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-DS128-NEXT: v_and_b32_e32 v53, 0xffff, v25
; GFX9-DS128-NEXT: v_and_b32_e32 v51, 0xffff, v24
; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:112
+; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v46, 16, v37
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v44, 16, v36
; GFX9-DS128-NEXT: v_and_b32_e32 v41, 0xffff, v39
; GFX9-DS128-NEXT: v_and_b32_e32 v39, 0xffff, v38
-; GFX9-DS128-NEXT: v_and_b32_e32 v45, 0xffff, v37
; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v3, 16, v25
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v1, 16, v24
; GFX9-DS128-NEXT: v_and_b32_e32 v2, 0xffff, v25
; GFX9-DS128-NEXT: v_and_b32_e32 v0, 0xffff, v24
; GFX9-DS128-NEXT: v_mov_b32_e32 v24, s0
+; GFX9-DS128-NEXT: v_and_b32_e32 v45, 0xffff, v37
; GFX9-DS128-NEXT: v_and_b32_e32 v43, 0xffff, v36
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v61, 16, v58
; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v59, 16, v57
@@ -4197,29 +4195,20 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-NO-DS128-LABEL: local_sextload_v64i16_to_v64i32:
; VI-NO-DS128: ; %bb.0:
; VI-NO-DS128-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; VI-NO-DS128-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
; VI-NO-DS128-NEXT: s_mov_b32 m0, -1
+; VI-NO-DS128-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
; VI-NO-DS128-NEXT: s_mov_b32 s89, SCRATCH_RSRC_DWORD1
; VI-NO-DS128-NEXT: s_mov_b32 s90, -1
; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(0)
; VI-NO-DS128-NEXT: v_mov_b32_e32 v28, s1
+; VI-NO-DS128-NEXT: ds_read2_b64 v[20:23], v28 offset0:4 offset1:5
+; VI-NO-DS128-NEXT: ds_read2_b64 v[29:32], v28 offset0:6 offset1:7
+; VI-NO-DS128-NEXT: ds_read2_b64 v[33:36], v28 offset0:8 offset1:9
; VI-NO-DS128-NEXT: ds_read2_b64 v[10:13], v28 offset1:1
; VI-NO-DS128-NEXT: ds_read2_b64 v[14:17], v28 offset0:2 offset1:3
; VI-NO-DS128-NEXT: s_mov_b32 s91, 0xe80000
; VI-NO-DS128-NEXT: s_add_u32 s88, s88, s11
-; VI-NO-DS128-NEXT: s_addc_u32 s89, s89, 0
-; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
-; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v11
-; VI-NO-DS128-NEXT: v_bfe_i32 v0, v11, 0, 16
-; VI-NO-DS128-NEXT: buffer_store_dword v0, off, s[88:91], 0 ; 4-byte Folded Spill
-; VI-NO-DS128-NEXT: buffer_store_dword v1, off, s[88:91], 0 offset:4 ; 4-byte Folded Spill
-; VI-NO-DS128-NEXT: ds_read2_b64 v[20:23], v28 offset0:4 offset1:5
-; VI-NO-DS128-NEXT: ds_read2_b64 v[29:32], v28 offset0:6 offset1:7
-; VI-NO-DS128-NEXT: ds_read2_b64 v[33:36], v28 offset0:8 offset1:9
-; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v10
-; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v5, 16, v13
-; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v7, 16, v12
-; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(3)
; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v25, 16, v30
; VI-NO-DS128-NEXT: v_bfe_i32 v24, v30, 0, 16
; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v27, 16, v29
@@ -4229,7 +4218,7 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v40, 16, v31
; VI-NO-DS128-NEXT: v_bfe_i32 v39, v31, 0, 16
; VI-NO-DS128-NEXT: ds_read2_b64 v[29:32], v28 offset0:10 offset1:11
-; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(3)
; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v42, 16, v34
; VI-NO-DS128-NEXT: v_bfe_i32 v41, v34, 0, 16
; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v44, 16, v33
@@ -4247,16 +4236,24 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v56, 16, v31
; VI-NO-DS128-NEXT: v_bfe_i32 v55, v31, 0, 16
; VI-NO-DS128-NEXT: ds_read2_b64 v[28:31], v28 offset0:14 offset1:15
+; VI-NO-DS128-NEXT: s_addc_u32 s89, s89, 0
+; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v11
+; VI-NO-DS128-NEXT: v_bfe_i32 v0, v11, 0, 16
+; VI-NO-DS128-NEXT: buffer_store_dword v0, off, s[88:91], 0 ; 4-byte Folded Spill
+; VI-NO-DS128-NEXT: buffer_store_dword v1, off, s[88:91], 0 offset:4 ; 4-byte Folded Spill
; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v54, 16, v32
; VI-NO-DS128-NEXT: v_bfe_i32 v53, v32, 0, 16
-; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v9, 16, v15
-; VI-NO-DS128-NEXT: v_bfe_i32 v2, v10, 0, 16
; VI-NO-DS128-NEXT: s_waitcnt lgkmcnt(0)
; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v32, 16, v31
; VI-NO-DS128-NEXT: v_bfe_i32 v31, v31, 0, 16
; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v30
; VI-NO-DS128-NEXT: v_bfe_i32 v0, v30, 0, 16
; VI-NO-DS128-NEXT: v_mov_b32_e32 v30, s0
+; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v10
+; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v5, 16, v13
+; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v7, 16, v12
+; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v9, 16, v15
+; VI-NO-DS128-NEXT: v_bfe_i32 v2, v10, 0, 16
; VI-NO-DS128-NEXT: v_bfe_i32 v4, v13, 0, 16
; VI-NO-DS128-NEXT: v_bfe_i32 v6, v12, 0, 16
; VI-NO-DS128-NEXT: v_ashrrev_i32_e32 v11, 16, v14
@@ -4316,23 +4313,14 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-NO-DS128-NEXT: s_mov_b32 s15, 0xe00000
; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NO-DS128-NEXT: v_mov_b32_e32 v28, s1
+; GFX9-NO-DS128-NEXT: ds_read2_b64 v[20:23], v28 offset0:4 offset1:5
+; GFX9-NO-DS128-NEXT: ds_read2_b64 v[29:32], v28 offset0:6 offset1:7
+; GFX9-NO-DS128-NEXT: ds_read2_b64 v[33:36], v28 offset0:8 offset1:9
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[10:13], v28 offset1:1
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[14:17], v28 offset0:2 offset1:3
; GFX9-NO-DS128-NEXT: s_add_u32 s12, s12, s11
; GFX9-NO-DS128-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
-; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v11
-; GFX9-NO-DS128-NEXT: v_bfe_i32 v0, v11, 0, 16
-; GFX9-NO-DS128-NEXT: buffer_store_dword v0, off, s[12:15], 0 ; 4-byte Folded Spill
-; GFX9-NO-DS128-NEXT: s_nop 0
-; GFX9-NO-DS128-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
-; GFX9-NO-DS128-NEXT: ds_read2_b64 v[20:23], v28 offset0:4 offset1:5
-; GFX9-NO-DS128-NEXT: ds_read2_b64 v[29:32], v28 offset0:6 offset1:7
-; GFX9-NO-DS128-NEXT: ds_read2_b64 v[33:36], v28 offset0:8 offset1:9
-; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v10
-; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v5, 16, v13
-; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v7, 16, v12
-; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(3)
; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v25, 16, v30
; GFX9-NO-DS128-NEXT: v_bfe_i32 v24, v30, 0, 16
; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v27, 16, v29
@@ -4342,7 +4330,7 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v40, 16, v31
; GFX9-NO-DS128-NEXT: v_bfe_i32 v39, v31, 0, 16
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[29:32], v28 offset0:10 offset1:11
-; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(3)
; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v42, 16, v34
; GFX9-NO-DS128-NEXT: v_bfe_i32 v41, v34, 0, 16
; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v44, 16, v33
@@ -4360,16 +4348,24 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v56, 16, v31
; GFX9-NO-DS128-NEXT: v_bfe_i32 v55, v31, 0, 16
; GFX9-NO-DS128-NEXT: ds_read2_b64 v[28:31], v28 offset0:14 offset1:15
+; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v11
+; GFX9-NO-DS128-NEXT: v_bfe_i32 v0, v11, 0, 16
+; GFX9-NO-DS128-NEXT: buffer_store_dword v0, off, s[12:15], 0 ; 4-byte Folded Spill
+; GFX9-NO-DS128-NEXT: s_nop 0
+; GFX9-NO-DS128-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v54, 16, v32
; GFX9-NO-DS128-NEXT: v_bfe_i32 v53, v32, 0, 16
-; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v9, 16, v15
-; GFX9-NO-DS128-NEXT: v_bfe_i32 v2, v10, 0, 16
; GFX9-NO-DS128-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v32, 16, v31
; GFX9-NO-DS128-NEXT: v_bfe_i32 v31, v31, 0, 16
; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v30
; GFX9-NO-DS128-NEXT: v_bfe_i32 v0, v30, 0, 16
; GFX9-NO-DS128-NEXT: v_mov_b32_e32 v30, s0
+; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v10
+; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v5, 16, v13
+; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v7, 16, v12
+; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v9, 16, v15
+; GFX9-NO-DS128-NEXT: v_bfe_i32 v2, v10, 0, 16
; GFX9-NO-DS128-NEXT: v_bfe_i32 v4, v13, 0, 16
; GFX9-NO-DS128-NEXT: v_bfe_i32 v6, v12, 0, 16
; GFX9-NO-DS128-NEXT: v_ashrrev_i32_e32 v11, 16, v14
@@ -4857,10 +4853,12 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-DS128-NEXT: v_mov_b32_e32 v32, s1
; VI-DS128-NEXT: ds_read_b128 v[8:11], v32
; VI-DS128-NEXT: ds_read_b128 v[16:19], v32 offset:16
+; VI-DS128-NEXT: ds_read_b128 v[24:27], v32 offset:32
+; VI-DS128-NEXT: ds_read_b128 v[33:36], v32 offset:48
; VI-DS128-NEXT: s_mov_b32 s91, 0xe80000
; VI-DS128-NEXT: s_add_u32 s88, s88, s11
; VI-DS128-NEXT: s_addc_u32 s89, s89, 0
-; VI-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; VI-DS128-NEXT: s_waitcnt lgkmcnt(3)
; VI-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v11
; VI-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v10
; VI-DS128-NEXT: v_bfe_i32 v2, v11, 0, 16
@@ -4873,12 +4871,6 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-DS128-NEXT: v_ashrrev_i32_e32 v4, 16, v8
; VI-DS128-NEXT: v_bfe_i32 v5, v9, 0, 16
; VI-DS128-NEXT: v_bfe_i32 v3, v8, 0, 16
-; VI-DS128-NEXT: buffer_store_dword v3, off, s[88:91], 0 offset:16 ; 4-byte Folded Spill
-; VI-DS128-NEXT: buffer_store_dword v4, off, s[88:91], 0 offset:20 ; 4-byte Folded Spill
-; VI-DS128-NEXT: buffer_store_dword v5, off, s[88:91], 0 offset:24 ; 4-byte Folded Spill
-; VI-DS128-NEXT: buffer_store_dword v6, off, s[88:91], 0 offset:28 ; 4-byte Folded Spill
-; VI-DS128-NEXT: ds_read_b128 v[24:27], v32 offset:32
-; VI-DS128-NEXT: ds_read_b128 v[33:36], v32 offset:48
; VI-DS128-NEXT: s_waitcnt lgkmcnt(2)
; VI-DS128-NEXT: v_ashrrev_i32_e32 v11, 16, v19
; VI-DS128-NEXT: v_ashrrev_i32_e32 v9, 16, v18
@@ -4899,8 +4891,11 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-DS128-NEXT: ds_read_b128 v[36:39], v32 offset:64
; VI-DS128-NEXT: ds_read_b128 v[40:43], v32 offset:80
; VI-DS128-NEXT: ds_read_b128 v[56:59], v32 offset:96
+; VI-DS128-NEXT: buffer_store_dword v3, off, s[88:91], 0 offset:16 ; 4-byte Folded Spill
+; VI-DS128-NEXT: buffer_store_dword v4, off, s[88:91], 0 offset:20 ; 4-byte Folded Spill
+; VI-DS128-NEXT: buffer_store_dword v5, off, s[88:91], 0 offset:24 ; 4-byte Folded Spill
+; VI-DS128-NEXT: buffer_store_dword v6, off, s[88:91], 0 offset:28 ; 4-byte Folded Spill
; VI-DS128-NEXT: v_ashrrev_i32_e32 v23, 16, v25
-; VI-DS128-NEXT: v_ashrrev_i32_e32 v21, 16, v24
; VI-DS128-NEXT: s_waitcnt lgkmcnt(2)
; VI-DS128-NEXT: v_ashrrev_i32_e32 v47, 16, v39
; VI-DS128-NEXT: v_ashrrev_i32_e32 v45, 16, v38
@@ -4913,14 +4908,15 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; VI-DS128-NEXT: v_bfe_i32 v52, v40, 0, 16
; VI-DS128-NEXT: ds_read_b128 v[37:40], v32 offset:112
; VI-DS128-NEXT: v_mov_b32_e32 v32, s0
+; VI-DS128-NEXT: v_ashrrev_i32_e32 v21, 16, v24
; VI-DS128-NEXT: v_bfe_i32 v22, v25, 0, 16
; VI-DS128-NEXT: v_bfe_i32 v20, v24, 0, 16
-; VI-DS128-NEXT: v_ashrrev_i32_e32 v25, 16, v35
; VI-DS128-NEXT: s_waitcnt lgkmcnt(0)
; VI-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v38
; VI-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v37
; VI-DS128-NEXT: v_bfe_i32 v2, v38, 0, 16
; VI-DS128-NEXT: v_bfe_i32 v0, v37, 0, 16
+; VI-DS128-NEXT: v_ashrrev_i32_e32 v25, 16, v35
; VI-DS128-NEXT: v_ashrrev_i32_e32 v31, 16, v34
; VI-DS128-NEXT: v_ashrrev_i32_e32 v29, 16, v33
; VI-DS128-NEXT: v_bfe_i32 v24, v35, 0, 16
@@ -4985,9 +4981,11 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-DS128-NEXT: v_mov_b32_e32 v32, s1
; GFX9-DS128-NEXT: ds_read_b128 v[8:11], v32
; GFX9-DS128-NEXT: ds_read_b128 v[16:19], v32 offset:16
+; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v32 offset:32
+; GFX9-DS128-NEXT: ds_read_b128 v[33:36], v32 offset:48
; GFX9-DS128-NEXT: s_add_u32 s12, s12, s11
; GFX9-DS128-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1)
+; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(3)
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v11
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v10
; GFX9-DS128-NEXT: v_bfe_i32 v2, v11, 0, 16
@@ -5001,13 +4999,6 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v4, 16, v8
; GFX9-DS128-NEXT: v_bfe_i32 v5, v9, 0, 16
; GFX9-DS128-NEXT: v_bfe_i32 v3, v8, 0, 16
-; GFX9-DS128-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:16 ; 4-byte Folded Spill
-; GFX9-DS128-NEXT: s_nop 0
-; GFX9-DS128-NEXT: buffer_store_dword v4, off, s[12:15], 0 offset:20 ; 4-byte Folded Spill
-; GFX9-DS128-NEXT: buffer_store_dword v5, off, s[12:15], 0 offset:24 ; 4-byte Folded Spill
-; GFX9-DS128-NEXT: buffer_store_dword v6, off, s[12:15], 0 offset:28 ; 4-byte Folded Spill
-; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v32 offset:32
-; GFX9-DS128-NEXT: ds_read_b128 v[33:36], v32 offset:48
; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2)
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v11, 16, v19
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v9, 16, v18
@@ -5028,8 +5019,12 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-DS128-NEXT: ds_read_b128 v[36:39], v32 offset:64
; GFX9-DS128-NEXT: ds_read_b128 v[40:43], v32 offset:80
; GFX9-DS128-NEXT: ds_read_b128 v[56:59], v32 offset:96
+; GFX9-DS128-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:16 ; 4-byte Folded Spill
+; GFX9-DS128-NEXT: s_nop 0
+; GFX9-DS128-NEXT: buffer_store_dword v4, off, s[12:15], 0 offset:20 ; 4-byte Folded Spill
+; GFX9-DS128-NEXT: buffer_store_dword v5, off, s[12:15], 0 offset:24 ; 4-byte Folded Spill
+; GFX9-DS128-NEXT: buffer_store_dword v6, off, s[12:15], 0 offset:28 ; 4-byte Folded Spill
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v23, 16, v25
-; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v21, 16, v24
; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2)
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v47, 16, v39
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v45, 16, v38
@@ -5042,14 +5037,15 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out
; GFX9-DS128-NEXT: v_bfe_i32 v52, v40, 0, 16
; GFX9-DS128-NEXT: ds_read_b128 v[37:40], v32 offset:112
; GFX9-DS128-NEXT: v_mov_b32_e32 v32, s0
+; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v21, 16, v24
; GFX9-DS128-NEXT: v_bfe_i32 v22, v25, 0, 16
; GFX9-DS128-NEXT: v_bfe_i32 v20, v24, 0, 16
-; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v25, 16, v35
; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v38
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v37
; GFX9-DS128-NEXT: v_bfe_i32 v2, v38, 0, 16
; GFX9-DS128-NEXT: v_bfe_i32 v0, v37, 0, 16
+; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v25, 16, v35
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v31, 16, v34
; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v29, 16, v33
; GFX9-DS128-NEXT: v_bfe_i32 v24, v35, 0, 16
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-lastuse-metadata.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-lastuse-metadata.ll
index 1d1d3e4..9da7a79 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-lastuse-metadata.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-lastuse-metadata.ll
@@ -15,24 +15,23 @@ define amdgpu_kernel void @buffer_last_use_load_0(ptr addrspace(7) %in, ptr addr
; GFX12-NEXT: s_mov_b32 s9, s12
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_mov_b32 s6, s3
-; GFX12-NEXT: v_mov_b32_e32 v0, s0
; GFX12-NEXT: s_mov_b32 s8, s1
; GFX12-NEXT: s_or_b64 s[10:11], s[6:7], s[12:13]
; GFX12-NEXT: s_mov_b32 s13, s2
-; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
; GFX12-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GFX12-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_LU
; GFX12-NEXT: s_clause 0x1
; GFX12-NEXT: s_load_b32 s13, s[4:5], 0x30
; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX12-NEXT: s_mov_b32 s5, s12
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s4, s3
; GFX12-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_LU
+; GFX12-NEXT: s_mov_b32 s4, s3
+; GFX12-NEXT: s_mov_b32 s3, s12
; GFX12-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
; GFX12-NEXT: s_mov_b32 s13, s2
; GFX12-NEXT: s_mov_b32 s2, s1
-; GFX12-NEXT: s_mov_b32 s3, s12
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13]
; GFX12-NEXT: s_wait_loadcnt 0x0
@@ -63,10 +62,10 @@ define amdgpu_kernel void @buffer_last_use_load_1(ptr addrspace(7) %in, ptr addr
; GFX12-NEXT: s_mov_b32 s13, s2
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GFX12-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_LU
; GFX12-NEXT: s_clause 0x1
; GFX12-NEXT: s_load_b32 s13, s[4:5], 0x30
; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
+; GFX12-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_LU
; GFX12-NEXT: s_mov_b32 s5, s12
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_mov_b32 s4, s3
@@ -100,25 +99,24 @@ define amdgpu_kernel void @buffer_last_use_and_volatile_load(ptr addrspace(7) %i
; GFX12-NEXT: s_mov_b32 s9, s12
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_mov_b32 s6, s3
-; GFX12-NEXT: v_mov_b32_e32 v0, s0
; GFX12-NEXT: s_mov_b32 s8, s1
; GFX12-NEXT: s_or_b64 s[10:11], s[6:7], s[12:13]
; GFX12-NEXT: s_mov_b32 s13, s2
-; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
; GFX12-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GFX12-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_BYPASS scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_clause 0x1
; GFX12-NEXT: s_load_b32 s13, s[4:5], 0x30
; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX12-NEXT: s_mov_b32 s5, s12
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s4, s3
; GFX12-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_BYPASS scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, s3
+; GFX12-NEXT: s_mov_b32 s3, s12
; GFX12-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
; GFX12-NEXT: s_mov_b32 s13, s2
; GFX12-NEXT: s_mov_b32 s2, s1
-; GFX12-NEXT: s_mov_b32 s3, s12
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13]
; GFX12-NEXT: buffer_store_b32 v0, v1, s[4:7], null offen
@@ -141,24 +139,23 @@ define amdgpu_kernel void @buffer_last_use_and_nontemporal_load(ptr addrspace(7)
; GFX12-NEXT: s_mov_b32 s9, s12
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_mov_b32 s6, s3
-; GFX12-NEXT: v_mov_b32_e32 v0, s0
; GFX12-NEXT: s_mov_b32 s8, s1
; GFX12-NEXT: s_or_b64 s[10:11], s[6:7], s[12:13]
; GFX12-NEXT: s_mov_b32 s13, s2
-; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
; GFX12-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GFX12-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_LU
; GFX12-NEXT: s_clause 0x1
; GFX12-NEXT: s_load_b32 s13, s[4:5], 0x30
; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX12-NEXT: s_mov_b32 s5, s12
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s4, s3
; GFX12-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_LU
+; GFX12-NEXT: s_mov_b32 s4, s3
+; GFX12-NEXT: s_mov_b32 s3, s12
; GFX12-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
; GFX12-NEXT: s_mov_b32 s13, s2
; GFX12-NEXT: s_mov_b32 s2, s1
-; GFX12-NEXT: s_mov_b32 s3, s12
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13]
; GFX12-NEXT: s_wait_loadcnt 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll
index fc36ed9..84db54c 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll
@@ -128,10 +128,10 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
; GFX10-SDAG-NEXT: s_or_b64 s[6:7], s[4:5], s[10:11]
; GFX10-SDAG-NEXT: s_mov_b32 s11, s2
; GFX10-SDAG-NEXT: s_or_b64 s[4:5], s[12:13], s[10:11]
-; GFX10-SDAG-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen slc
; GFX10-SDAG-NEXT: s_clause 0x1
; GFX10-SDAG-NEXT: s_load_dword s11, s[8:9], 0x30
; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x20
+; GFX10-SDAG-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen slc
; GFX10-SDAG-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-SDAG-NEXT: s_mov_b32 s5, s10
; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
@@ -181,24 +181,23 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
; GFX11-SDAG-NEXT: s_mov_b32 s9, s12
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-NEXT: s_mov_b32 s6, s3
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s0
; GFX11-SDAG-NEXT: s_mov_b32 s8, s1
; GFX11-SDAG-NEXT: s_or_b64 s[10:11], s[6:7], s[12:13]
; GFX11-SDAG-NEXT: s_mov_b32 s13, s2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s0
; GFX11-SDAG-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GFX11-SDAG-NEXT: buffer_load_b32 v0, v0, s[8:11], 0 offen slc dlc
; GFX11-SDAG-NEXT: s_clause 0x1
; GFX11-SDAG-NEXT: s_load_b32 s13, s[4:5], 0x30
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX11-SDAG-NEXT: s_mov_b32 s5, s12
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_mov_b32 s4, s3
; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-NEXT: buffer_load_b32 v0, v0, s[8:11], 0 offen slc dlc
+; GFX11-SDAG-NEXT: s_mov_b32 s4, s3
+; GFX11-SDAG-NEXT: s_mov_b32 s3, s12
; GFX11-SDAG-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
; GFX11-SDAG-NEXT: s_mov_b32 s13, s2
; GFX11-SDAG-NEXT: s_mov_b32 s2, s1
-; GFX11-SDAG-NEXT: s_mov_b32 s3, s12
; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-SDAG-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13]
; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
@@ -215,12 +214,12 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
; GFX11-GISEL-NEXT: s_mov_b32 s8, s1
; GFX11-GISEL-NEXT: s_mov_b32 s9, s2
; GFX11-GISEL-NEXT: s_mov_b32 s10, s3
-; GFX11-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], 0 offen slc dlc
; GFX11-GISEL-NEXT: s_clause 0x1
; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX11-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x30
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], 0 offen slc dlc
; GFX11-GISEL-NEXT: s_mov_b32 s4, s1
; GFX11-GISEL-NEXT: s_mov_b32 s5, s2
; GFX11-GISEL-NEXT: s_mov_b32 s6, s3
@@ -239,24 +238,23 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
; GFX12-SDAG-NEXT: s_mov_b32 s9, s12
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-NEXT: s_mov_b32 s6, s3
-; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, s0
; GFX12-SDAG-NEXT: s_mov_b32 s8, s1
; GFX12-SDAG-NEXT: s_or_b64 s[10:11], s[6:7], s[12:13]
; GFX12-SDAG-NEXT: s_mov_b32 s13, s2
-; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, s0
; GFX12-SDAG-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GFX12-SDAG-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_NT
; GFX12-SDAG-NEXT: s_clause 0x1
; GFX12-SDAG-NEXT: s_load_b32 s13, s[4:5], 0x30
; GFX12-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX12-SDAG-NEXT: s_mov_b32 s5, s12
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: s_mov_b32 s4, s3
; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_NT
+; GFX12-SDAG-NEXT: s_mov_b32 s4, s3
+; GFX12-SDAG-NEXT: s_mov_b32 s3, s12
; GFX12-SDAG-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
; GFX12-SDAG-NEXT: s_mov_b32 s13, s2
; GFX12-SDAG-NEXT: s_mov_b32 s2, s1
-; GFX12-SDAG-NEXT: s_mov_b32 s3, s12
; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13]
; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
@@ -273,12 +271,12 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
; GFX12-GISEL-NEXT: s_mov_b32 s8, s1
; GFX12-GISEL-NEXT: s_mov_b32 s9, s2
; GFX12-GISEL-NEXT: s_mov_b32 s10, s3
-; GFX12-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_NT
; GFX12-GISEL-NEXT: s_clause 0x1
; GFX12-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX12-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x30
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_NT
; GFX12-GISEL-NEXT: s_mov_b32 s4, s1
; GFX12-GISEL-NEXT: s_mov_b32 s5, s2
; GFX12-GISEL-NEXT: s_mov_b32 s6, s3
@@ -413,11 +411,11 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
; GFX10-SDAG-NEXT: s_or_b64 s[6:7], s[4:5], s[10:11]
; GFX10-SDAG-NEXT: s_mov_b32 s11, s2
; GFX10-SDAG-NEXT: s_or_b64 s[4:5], s[12:13], s[10:11]
-; GFX10-SDAG-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen glc dlc
-; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX10-SDAG-NEXT: s_clause 0x1
; GFX10-SDAG-NEXT: s_load_dword s11, s[8:9], 0x30
; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x20
+; GFX10-SDAG-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen glc dlc
+; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX10-SDAG-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-SDAG-NEXT: s_mov_b32 s5, s10
; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
@@ -468,25 +466,24 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
; GFX11-SDAG-NEXT: s_mov_b32 s9, s12
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-NEXT: s_mov_b32 s6, s3
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s0
; GFX11-SDAG-NEXT: s_mov_b32 s8, s1
; GFX11-SDAG-NEXT: s_or_b64 s[10:11], s[6:7], s[12:13]
; GFX11-SDAG-NEXT: s_mov_b32 s13, s2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s0
; GFX11-SDAG-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GFX11-SDAG-NEXT: buffer_load_b32 v0, v0, s[8:11], 0 offen glc dlc
-; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
; GFX11-SDAG-NEXT: s_clause 0x1
; GFX11-SDAG-NEXT: s_load_b32 s13, s[4:5], 0x30
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX11-SDAG-NEXT: s_mov_b32 s5, s12
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_mov_b32 s4, s3
; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-SDAG-NEXT: buffer_load_b32 v0, v0, s[8:11], 0 offen glc dlc
+; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-NEXT: s_mov_b32 s4, s3
+; GFX11-SDAG-NEXT: s_mov_b32 s3, s12
; GFX11-SDAG-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
; GFX11-SDAG-NEXT: s_mov_b32 s13, s2
; GFX11-SDAG-NEXT: s_mov_b32 s2, s1
-; GFX11-SDAG-NEXT: s_mov_b32 s3, s12
; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-SDAG-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13]
; GFX11-SDAG-NEXT: buffer_store_b32 v0, v1, s[4:7], 0 offen dlc
@@ -503,13 +500,13 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
; GFX11-GISEL-NEXT: s_mov_b32 s8, s1
; GFX11-GISEL-NEXT: s_mov_b32 s9, s2
; GFX11-GISEL-NEXT: s_mov_b32 s10, s3
-; GFX11-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], 0 offen glc dlc
-; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX11-GISEL-NEXT: s_clause 0x1
; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX11-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x30
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s0
+; GFX11-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], 0 offen glc dlc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX11-GISEL-NEXT: s_mov_b32 s4, s1
; GFX11-GISEL-NEXT: s_mov_b32 s5, s2
; GFX11-GISEL-NEXT: s_mov_b32 s6, s3
@@ -528,25 +525,24 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
; GFX12-SDAG-NEXT: s_mov_b32 s9, s12
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-NEXT: s_mov_b32 s6, s3
-; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, s0
; GFX12-SDAG-NEXT: s_mov_b32 s8, s1
; GFX12-SDAG-NEXT: s_or_b64 s[10:11], s[6:7], s[12:13]
; GFX12-SDAG-NEXT: s_mov_b32 s13, s2
-; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, s0
; GFX12-SDAG-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GFX12-SDAG-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_NT scope:SCOPE_SYS
-; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX12-SDAG-NEXT: s_clause 0x1
; GFX12-SDAG-NEXT: s_load_b32 s13, s[4:5], 0x30
; GFX12-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX12-SDAG-NEXT: s_mov_b32 s5, s12
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-NEXT: s_mov_b32 s4, s3
; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-SDAG-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_NT scope:SCOPE_SYS
+; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-NEXT: s_mov_b32 s4, s3
+; GFX12-SDAG-NEXT: s_mov_b32 s3, s12
; GFX12-SDAG-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
; GFX12-SDAG-NEXT: s_mov_b32 s13, s2
; GFX12-SDAG-NEXT: s_mov_b32 s2, s1
-; GFX12-SDAG-NEXT: s_mov_b32 s3, s12
; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-SDAG-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13]
; GFX12-SDAG-NEXT: buffer_store_b32 v0, v1, s[4:7], null offen th:TH_STORE_NT scope:SCOPE_SYS
@@ -563,13 +559,13 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
; GFX12-GISEL-NEXT: s_mov_b32 s8, s1
; GFX12-GISEL-NEXT: s_mov_b32 s9, s2
; GFX12-GISEL-NEXT: s_mov_b32 s10, s3
-; GFX12-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_NT scope:SCOPE_SYS
-; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX12-GISEL-NEXT: s_clause 0x1
; GFX12-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX12-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x30
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, s0
+; GFX12-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_NT scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX12-GISEL-NEXT: s_mov_b32 s4, s1
; GFX12-GISEL-NEXT: s_mov_b32 s5, s2
; GFX12-GISEL-NEXT: s_mov_b32 s6, s3
diff --git a/llvm/test/CodeGen/AMDGPU/max.ll b/llvm/test/CodeGen/AMDGPU/max.ll
index ae08054..ba53294 100644
--- a/llvm/test/CodeGen/AMDGPU/max.ll
+++ b/llvm/test/CodeGen/AMDGPU/max.ll
@@ -774,9 +774,9 @@ define amdgpu_kernel void @v_test_umax_ugt_i32(ptr addrspace(1) %out, ptr addrsp
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset
; GFX1250-NEXT: s_load_b32 s6, s[0:1], 0x0
; GFX1250-NEXT: s_load_b64 s[2:3], s[4:5], 0x24
+; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset
; GFX1250-NEXT: s_wait_loadcnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: v_max_u32_e32 v0, s6, v0
diff --git a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
index 02f39e2..888a458 100644
--- a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
+++ b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
@@ -714,7 +714,7 @@ define void @memcpy_p0_p0_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(0)
; ALIGNED-NEXT: flat_store_byte v[16:17], v4 offset:1
; ALIGNED-NEXT: s_cbranch_vccnz .LBB0_1
; ALIGNED-NEXT: ; %bb.2: ; %memcpy-split
-; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; ALIGNED-NEXT: buffer_load_dword v47, off, s[0:3], s32
; ALIGNED-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4
; ALIGNED-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8
@@ -1468,7 +1468,7 @@ define void @memcpy_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1)
; ALIGNED-NEXT: global_store_byte v[16:17], v4, off offset:1
; ALIGNED-NEXT: s_cbranch_vccnz .LBB1_1
; ALIGNED-NEXT: ; %bb.2: ; %memcpy-split
-; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; ALIGNED-NEXT: buffer_load_dword v47, off, s[0:3], s32
; ALIGNED-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4
; ALIGNED-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8
@@ -1854,6 +1854,10 @@ define void @memcpy_p0_p4_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(4)
; ALIGNED-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:220
; ALIGNED-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:212
; ALIGNED-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:208
+; ALIGNED-NEXT: flat_store_byte v[96:97], v82 offset:143
+; ALIGNED-NEXT: s_waitcnt vmcnt(3)
+; ALIGNED-NEXT: v_lshrrev_b32_e32 v82, 24, v18
+; ALIGNED-NEXT: v_lshrrev_b32_e32 v51, 8, v26
; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v38 offset:138
; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v39 offset:142
; ALIGNED-NEXT: flat_store_byte v[96:97], v39 offset:140
@@ -1862,10 +1866,6 @@ define void @memcpy_p0_p4_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(4)
; ALIGNED-NEXT: flat_store_byte v[96:97], v37 offset:132
; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v36 offset:130
; ALIGNED-NEXT: flat_store_byte v[96:97], v36 offset:128
-; ALIGNED-NEXT: flat_store_byte v[96:97], v82 offset:143
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: v_lshrrev_b32_e32 v82, 24, v18
-; ALIGNED-NEXT: v_lshrrev_b32_e32 v51, 8, v26
; ALIGNED-NEXT: flat_store_byte v[96:97], v66 offset:139
; ALIGNED-NEXT: flat_store_byte v[96:97], v67 offset:137
; ALIGNED-NEXT: flat_store_byte v[96:97], v83 offset:141
@@ -1901,14 +1901,6 @@ define void @memcpy_p0_p4_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(4)
; ALIGNED-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:60
; ALIGNED-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:52
; ALIGNED-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:48
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v30 offset:106
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v31 offset:110
-; ALIGNED-NEXT: flat_store_byte v[96:97], v31 offset:108
-; ALIGNED-NEXT: flat_store_byte v[96:97], v30 offset:104
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v29 offset:102
-; ALIGNED-NEXT: flat_store_byte v[96:97], v29 offset:100
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v28 offset:98
-; ALIGNED-NEXT: flat_store_byte v[96:97], v28 offset:96
; ALIGNED-NEXT: flat_store_byte v[96:97], v69 offset:111
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: v_lshrrev_b32_e32 v69, 24, v6
@@ -1923,6 +1915,14 @@ define void @memcpy_p0_p4_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(4)
; ALIGNED-NEXT: v_lshrrev_b32_e32 v101, 24, v25
; ALIGNED-NEXT: v_lshrrev_b32_e32 v99, 24, v19
; ALIGNED-NEXT: v_lshrrev_b32_e32 v86, 24, v15
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v30 offset:106
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v31 offset:110
+; ALIGNED-NEXT: flat_store_byte v[96:97], v31 offset:108
+; ALIGNED-NEXT: flat_store_byte v[96:97], v30 offset:104
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v29 offset:102
+; ALIGNED-NEXT: flat_store_byte v[96:97], v29 offset:100
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v28 offset:98
+; ALIGNED-NEXT: flat_store_byte v[96:97], v28 offset:96
; ALIGNED-NEXT: v_lshrrev_b32_e32 v28, 24, v11
; ALIGNED-NEXT: flat_store_byte v[96:97], v71 offset:103
; ALIGNED-NEXT: v_lshrrev_b32_e32 v71, 24, v7
@@ -3438,7 +3438,7 @@ define void @memcpy_p5_p5_sz2048(ptr addrspace(5) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: v_add_nc_u32_e32 v0, 0x100, v0
; ALIGNED-NEXT: s_cbranch_vccnz .LBB3_1
; ALIGNED-NEXT: ; %bb.2: ; %memcpy-split
-; ALIGNED-NEXT: s_clause 0x2f
+; ALIGNED-NEXT: s_clause 0x2f ; 192-byte Folded Reload
; ALIGNED-NEXT: buffer_load_dword v127, off, s[0:3], s32
; ALIGNED-NEXT: buffer_load_dword v126, off, s[0:3], s32 offset:4
; ALIGNED-NEXT: buffer_load_dword v125, off, s[0:3], s32 offset:8
@@ -3741,23 +3741,23 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1228 ; 4-byte Folded Spill
; ALIGNED-NEXT: .LBB4_1: ; %load-store-loop
; ALIGNED-NEXT: ; =>This Inner Loop Header: Depth=1
-; ALIGNED-NEXT: s_clause 0x39
+; ALIGNED-NEXT: s_clause 0x3e
; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:20
; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:21
; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:22
; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:23
-; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:24
-; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:25
-; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:26
; ALIGNED-NEXT: buffer_load_ubyte v126, v2, s[0:3], 0 offen offset:19
; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:28
; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:29
; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:30
; ALIGNED-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:31
+; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:24
+; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:25
+; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:26
+; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:27
; ALIGNED-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:32
; ALIGNED-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:33
; ALIGNED-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:34
-; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:27
; ALIGNED-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:35
; ALIGNED-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:36
; ALIGNED-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:37
@@ -3779,17 +3779,17 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:53
; ALIGNED-NEXT: buffer_load_ubyte v35, v2, s[0:3], 0 offen offset:54
; ALIGNED-NEXT: buffer_load_ubyte v36, v2, s[0:3], 0 offen offset:55
-; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:56
-; ALIGNED-NEXT: buffer_load_ubyte v51, v2, s[0:3], 0 offen offset:57
-; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:58
; ALIGNED-NEXT: buffer_load_ubyte v38, v2, s[0:3], 0 offen offset:60
; ALIGNED-NEXT: buffer_load_ubyte v50, v2, s[0:3], 0 offen offset:61
; ALIGNED-NEXT: buffer_load_ubyte v39, v2, s[0:3], 0 offen offset:62
; ALIGNED-NEXT: buffer_load_ubyte v49, v2, s[0:3], 0 offen offset:63
+; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:56
+; ALIGNED-NEXT: buffer_load_ubyte v51, v2, s[0:3], 0 offen offset:57
+; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:58
+; ALIGNED-NEXT: buffer_load_ubyte v53, v2, s[0:3], 0 offen offset:59
; ALIGNED-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:64
; ALIGNED-NEXT: buffer_load_ubyte v55, v2, s[0:3], 0 offen offset:65
; ALIGNED-NEXT: buffer_load_ubyte v66, v2, s[0:3], 0 offen offset:66
-; ALIGNED-NEXT: buffer_load_ubyte v53, v2, s[0:3], 0 offen offset:59
; ALIGNED-NEXT: buffer_load_ubyte v67, v2, s[0:3], 0 offen offset:67
; ALIGNED-NEXT: buffer_load_ubyte v54, v2, s[0:3], 0 offen offset:68
; ALIGNED-NEXT: buffer_load_ubyte v64, v2, s[0:3], 0 offen offset:69
@@ -3797,57 +3797,96 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: buffer_load_ubyte v68, v2, s[0:3], 0 offen offset:71
; ALIGNED-NEXT: buffer_load_ubyte v69, v2, s[0:3], 0 offen offset:76
; ALIGNED-NEXT: buffer_load_ubyte v70, v2, s[0:3], 0 offen offset:77
-; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:75
; ALIGNED-NEXT: buffer_load_ubyte v71, v2, s[0:3], 0 offen offset:78
; ALIGNED-NEXT: buffer_load_ubyte v80, v2, s[0:3], 0 offen offset:79
-; ALIGNED-NEXT: s_waitcnt vmcnt(57)
+; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:75
+; ALIGNED-NEXT: buffer_load_ubyte v127, v2, s[0:3], 0 offen offset:159
+; ALIGNED-NEXT: buffer_load_ubyte v124, v2, s[0:3], 0 offen offset:155
+; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:152
+; ALIGNED-NEXT: buffer_load_ubyte v123, v2, s[0:3], 0 offen offset:153
+; ALIGNED-NEXT: buffer_load_ubyte v111, v2, s[0:3], 0 offen offset:154
+; ALIGNED-NEXT: s_clause 0x33
+; ALIGNED-NEXT: buffer_load_ubyte v110, v2, s[0:3], 0 offen offset:160
+; ALIGNED-NEXT: buffer_load_ubyte v105, v2, s[0:3], 0 offen offset:161
+; ALIGNED-NEXT: buffer_load_ubyte v94, v2, s[0:3], 0 offen offset:162
+; ALIGNED-NEXT: buffer_load_ubyte v93, v2, s[0:3], 0 offen offset:163
+; ALIGNED-NEXT: buffer_load_ubyte v108, v2, s[0:3], 0 offen offset:164
+; ALIGNED-NEXT: buffer_load_ubyte v95, v2, s[0:3], 0 offen offset:165
+; ALIGNED-NEXT: buffer_load_ubyte v104, v2, s[0:3], 0 offen offset:166
+; ALIGNED-NEXT: buffer_load_ubyte v92, v2, s[0:3], 0 offen offset:167
+; ALIGNED-NEXT: buffer_load_ubyte v116, v2, s[0:3], 0 offen offset:192
+; ALIGNED-NEXT: buffer_load_ubyte v113, v2, s[0:3], 0 offen offset:193
+; ALIGNED-NEXT: buffer_load_ubyte v101, v2, s[0:3], 0 offen offset:194
+; ALIGNED-NEXT: buffer_load_ubyte v102, v2, s[0:3], 0 offen offset:195
+; ALIGNED-NEXT: buffer_load_ubyte v114, v2, s[0:3], 0 offen offset:196
+; ALIGNED-NEXT: buffer_load_ubyte v103, v2, s[0:3], 0 offen offset:197
+; ALIGNED-NEXT: buffer_load_ubyte v112, v2, s[0:3], 0 offen offset:198
+; ALIGNED-NEXT: buffer_load_ubyte v100, v2, s[0:3], 0 offen offset:199
+; ALIGNED-NEXT: buffer_load_ubyte v98, v2, s[0:3], 0 offen offset:204
+; ALIGNED-NEXT: buffer_load_ubyte v97, v2, s[0:3], 0 offen offset:205
+; ALIGNED-NEXT: buffer_load_ubyte v96, v2, s[0:3], 0 offen offset:206
+; ALIGNED-NEXT: buffer_load_ubyte v87, v2, s[0:3], 0 offen offset:207
+; ALIGNED-NEXT: buffer_load_ubyte v85, v2, s[0:3], 0 offen offset:203
+; ALIGNED-NEXT: buffer_load_ubyte v86, v2, s[0:3], 0 offen offset:200
+; ALIGNED-NEXT: buffer_load_ubyte v84, v2, s[0:3], 0 offen offset:201
+; ALIGNED-NEXT: buffer_load_ubyte v83, v2, s[0:3], 0 offen offset:202
+; ALIGNED-NEXT: buffer_load_ubyte v90, v2, s[0:3], 0 offen offset:172
+; ALIGNED-NEXT: buffer_load_ubyte v79, v2, s[0:3], 0 offen offset:173
+; ALIGNED-NEXT: buffer_load_ubyte v88, v2, s[0:3], 0 offen offset:174
+; ALIGNED-NEXT: buffer_load_ubyte v78, v2, s[0:3], 0 offen offset:175
+; ALIGNED-NEXT: buffer_load_ubyte v75, v2, s[0:3], 0 offen offset:171
+; ALIGNED-NEXT: buffer_load_ubyte v76, v2, s[0:3], 0 offen offset:168
+; ALIGNED-NEXT: buffer_load_ubyte v72, v2, s[0:3], 0 offen offset:169
+; ALIGNED-NEXT: buffer_load_ubyte v63, v2, s[0:3], 0 offen offset:170
+; ALIGNED-NEXT: buffer_load_ubyte v62, v2, s[0:3], 0 offen offset:176
+; ALIGNED-NEXT: buffer_load_ubyte v60, v2, s[0:3], 0 offen offset:177
+; ALIGNED-NEXT: buffer_load_ubyte v57, v2, s[0:3], 0 offen offset:178
+; ALIGNED-NEXT: buffer_load_ubyte v56, v2, s[0:3], 0 offen offset:179
+; ALIGNED-NEXT: buffer_load_ubyte v61, v2, s[0:3], 0 offen offset:180
+; ALIGNED-NEXT: buffer_load_ubyte v59, v2, s[0:3], 0 offen offset:181
+; ALIGNED-NEXT: buffer_load_ubyte v58, v2, s[0:3], 0 offen offset:182
+; ALIGNED-NEXT: buffer_load_ubyte v47, v2, s[0:3], 0 offen offset:183
+; ALIGNED-NEXT: buffer_load_ubyte v45, v2, s[0:3], 0 offen offset:188
+; ALIGNED-NEXT: buffer_load_ubyte v43, v2, s[0:3], 0 offen offset:189
+; ALIGNED-NEXT: buffer_load_ubyte v44, v2, s[0:3], 0 offen offset:190
+; ALIGNED-NEXT: buffer_load_ubyte v42, v2, s[0:3], 0 offen offset:191
+; ALIGNED-NEXT: buffer_load_ubyte v40, v2, s[0:3], 0 offen offset:187
+; ALIGNED-NEXT: buffer_load_ubyte v41, v2, s[0:3], 0 offen offset:184
+; ALIGNED-NEXT: buffer_load_ubyte v119, v2, s[0:3], 0 offen offset:185
+; ALIGNED-NEXT: buffer_load_ubyte v118, v2, s[0:3], 0 offen offset:186
+; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:2
+; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:4
+; ALIGNED-NEXT: buffer_load_ubyte v109, v2, s[0:3], 0 offen offset:5
+; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:6
+; ALIGNED-NEXT: s_waitcnt vmcnt(62)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(56)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(55)
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(54)
; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(53)
-; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(52)
-; ALIGNED-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(51)
-; ALIGNED-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(49)
; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(48)
; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(47)
; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(46)
; ALIGNED-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(45)
+; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 8, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v7, 8, v5
-; ALIGNED-NEXT: s_waitcnt vmcnt(42)
-; ALIGNED-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v4, v9, 8, v8
-; ALIGNED-NEXT: s_waitcnt vmcnt(40)
; ALIGNED-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v5, v10, 8, v6
; ALIGNED-NEXT: v_lshl_or_b32 v6, v11, 8, v12
; ALIGNED-NEXT: v_lshl_or_b32 v7, v15, 8, v14
; ALIGNED-NEXT: v_lshl_or_b32 v8, v19, 8, v17
-; ALIGNED-NEXT: s_waitcnt vmcnt(39)
; ALIGNED-NEXT: v_lshl_or_b32 v9, v16, 8, v13
-; ALIGNED-NEXT: s_waitcnt vmcnt(37)
; ALIGNED-NEXT: v_lshl_or_b32 v10, v20, 8, v18
-; ALIGNED-NEXT: s_waitcnt vmcnt(35)
; ALIGNED-NEXT: v_lshl_or_b32 v11, v23, 8, v22
-; ALIGNED-NEXT: s_waitcnt vmcnt(33)
; ALIGNED-NEXT: v_lshl_or_b32 v12, v28, 8, v25
-; ALIGNED-NEXT: s_waitcnt vmcnt(31)
; ALIGNED-NEXT: v_lshl_or_b32 v13, v24, 8, v21
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(29)
; ALIGNED-NEXT: v_lshl_or_b32 v14, v27, 8, v26
; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v6, 16, v5
@@ -3856,76 +3895,83 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: v_lshl_or_b32 v6, v12, 16, v11
; ALIGNED-NEXT: v_lshl_or_b32 v7, v14, 16, v13
; ALIGNED-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(27)
; ALIGNED-NEXT: v_lshl_or_b32 v15, v31, 8, v30
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(25)
; ALIGNED-NEXT: v_lshl_or_b32 v0, v34, 8, v33
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(23)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v37, 8, v32
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(21)
; ALIGNED-NEXT: v_lshl_or_b32 v3, v36, 8, v35
; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(16)
; ALIGNED-NEXT: v_lshl_or_b32 v4, v50, 8, v38
; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(14)
; ALIGNED-NEXT: v_lshl_or_b32 v5, v49, 8, v39
; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v6, v51, 8, v48
; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(10)
; ALIGNED-NEXT: v_lshl_or_b32 v7, v53, 8, v52
; ALIGNED-NEXT: v_lshl_or_b32 v0, v0, 16, v15
; ALIGNED-NEXT: v_lshl_or_b32 v1, v3, 16, v1
; ALIGNED-NEXT: v_lshl_or_b32 v3, v5, 16, v4
+; ALIGNED-NEXT: s_clause 0x1
; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:85
+; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:87
; ALIGNED-NEXT: v_lshl_or_b32 v4, v7, 16, v6
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v55, 8, v29
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(10)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v67, 8, v66
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(8)
; ALIGNED-NEXT: v_lshl_or_b32 v3, v64, 8, v54
; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
; ALIGNED-NEXT: v_lshl_or_b32 v4, v68, 8, v65
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:86
; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:82
-; ALIGNED-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:86
+; ALIGNED-NEXT: buffer_store_dword v65, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 16, v3
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
+; ALIGNED-NEXT: s_waitcnt vmcnt(62)
; ALIGNED-NEXT: v_lshl_or_b32 v0, v70, 8, v69
; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:83
; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:74
+; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:83
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(5)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v80, 8, v71
-; ALIGNED-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v68, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v69, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v71, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(62)
+; ALIGNED-NEXT: buffer_store_dword v81, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:73
-; ALIGNED-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:72
-; ALIGNED-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v67, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v64, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v70, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v80, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: buffer_load_ubyte v80, v2, s[0:3], 0 offen offset:216
+; ALIGNED-NEXT: buffer_load_ubyte v67, v2, s[0:3], 0 offen offset:217
+; ALIGNED-NEXT: buffer_load_ubyte v53, v2, s[0:3], 0 offen offset:218
+; ALIGNED-NEXT: buffer_load_ubyte v54, v2, s[0:3], 0 offen offset:219
+; ALIGNED-NEXT: buffer_load_ubyte v70, v2, s[0:3], 0 offen offset:220
+; ALIGNED-NEXT: buffer_load_ubyte v55, v2, s[0:3], 0 offen offset:221
+; ALIGNED-NEXT: buffer_load_ubyte v64, v2, s[0:3], 0 offen offset:222
+; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:223
+; ALIGNED-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x2
+; ALIGNED-NEXT: buffer_load_ubyte v50, v2, s[0:3], 0 offen offset:208
+; ALIGNED-NEXT: buffer_load_ubyte v39, v2, s[0:3], 0 offen offset:209
+; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:210
; ALIGNED-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
@@ -3934,52 +3980,82 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: buffer_load_ubyte v38, v2, s[0:3], 0 offen offset:224
+; ALIGNED-NEXT: buffer_load_ubyte v36, v2, s[0:3], 0 offen offset:225
+; ALIGNED-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:226
+; ALIGNED-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:227
+; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:228
+; ALIGNED-NEXT: buffer_load_ubyte v35, v2, s[0:3], 0 offen offset:229
+; ALIGNED-NEXT: buffer_load_ubyte v34, v2, s[0:3], 0 offen offset:230
+; ALIGNED-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:231
+; ALIGNED-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x4
+; ALIGNED-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:236
+; ALIGNED-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:237
+; ALIGNED-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:238
+; ALIGNED-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:239
+; ALIGNED-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:235
+; ALIGNED-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x2
+; ALIGNED-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:232
+; ALIGNED-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:233
+; ALIGNED-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:234
+; ALIGNED-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:240
+; ALIGNED-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:241
+; ALIGNED-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:242
+; ALIGNED-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:243
+; ALIGNED-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:244
+; ALIGNED-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:245
+; ALIGNED-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:246
+; ALIGNED-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:247
; ALIGNED-NEXT: buffer_store_dword v66, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v67, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v64, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v65, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v68, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v69, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v70, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v71, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v80, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v126, off, s[0:3], s32 offset:1188 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v81, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:87
-; ALIGNED-NEXT: s_waitcnt vmcnt(7)
+; ALIGNED-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(42)
; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
+; ALIGNED-NEXT: s_waitcnt vmcnt(41)
+; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(39)
; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(5)
-; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(4)
-; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
+; ALIGNED-NEXT: s_waitcnt vmcnt(38)
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
+; ALIGNED-NEXT: s_waitcnt vmcnt(36)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
+; ALIGNED-NEXT: s_waitcnt vmcnt(35)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v81, 8, v3
; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:84
+; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x4
+; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:212
+; ALIGNED-NEXT: buffer_load_ubyte v68, v2, s[0:3], 0 offen offset:213
+; ALIGNED-NEXT: buffer_load_ubyte v71, v2, s[0:3], 0 offen offset:214
+; ALIGNED-NEXT: buffer_load_ubyte v65, v2, s[0:3], 0 offen offset:215
+; ALIGNED-NEXT: buffer_load_ubyte v69, v2, s[0:3], 0 offen offset:211
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:81
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:80
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
+; ALIGNED-NEXT: s_waitcnt vmcnt(8)
+; ALIGNED-NEXT: v_lshl_or_b32 v73, v13, 8, v16
+; ALIGNED-NEXT: s_waitcnt vmcnt(7)
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
@@ -4251,259 +4327,132 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1072 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 8, v7
+; ALIGNED-NEXT: s_clause 0x4
+; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:252
+; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:253
+; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:254
+; ALIGNED-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:255
+; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:251
+; ALIGNED-NEXT: v_lshl_or_b32 v4, v102, 8, v101
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v8, 8, v6
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1136 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v5, 8, v3
+; ALIGNED-NEXT: s_clause 0x3
; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:158
+; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:248
+; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:249
+; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:250
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:157
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1148 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:156
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
+; ALIGNED-NEXT: s_waitcnt vmcnt(7)
+; ALIGNED-NEXT: v_lshl_or_b32 v77, v9, 8, v10
+; ALIGNED-NEXT: s_waitcnt vmcnt(5)
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1160 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1152 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1144 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v127, v2, s[0:3], 0 offen offset:159
-; ALIGNED-NEXT: buffer_load_ubyte v124, v2, s[0:3], 0 offen offset:155
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v127, 8, v3
+; ALIGNED-NEXT: v_lshl_or_b32 v3, v113, 8, v116
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1172 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x2
-; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:152
-; ALIGNED-NEXT: buffer_load_ubyte v123, v2, s[0:3], 0 offen offset:153
-; ALIGNED-NEXT: buffer_load_ubyte v111, v2, s[0:3], 0 offen offset:154
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v123, 8, v125
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v124, 8, v111
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1180 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x7
-; ALIGNED-NEXT: buffer_load_ubyte v110, v2, s[0:3], 0 offen offset:160
-; ALIGNED-NEXT: buffer_load_ubyte v105, v2, s[0:3], 0 offen offset:161
-; ALIGNED-NEXT: buffer_load_ubyte v94, v2, s[0:3], 0 offen offset:162
-; ALIGNED-NEXT: buffer_load_ubyte v93, v2, s[0:3], 0 offen offset:163
-; ALIGNED-NEXT: buffer_load_ubyte v108, v2, s[0:3], 0 offen offset:164
-; ALIGNED-NEXT: buffer_load_ubyte v95, v2, s[0:3], 0 offen offset:165
-; ALIGNED-NEXT: buffer_load_ubyte v104, v2, s[0:3], 0 offen offset:166
-; ALIGNED-NEXT: buffer_load_ubyte v92, v2, s[0:3], 0 offen offset:167
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v105, 8, v110
-; ALIGNED-NEXT: s_waitcnt vmcnt(4)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v93, 8, v94
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v92, 8, v104
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1192 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v95, 8, v108
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1196 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x4
-; ALIGNED-NEXT: buffer_load_ubyte v90, v2, s[0:3], 0 offen offset:172
-; ALIGNED-NEXT: buffer_load_ubyte v79, v2, s[0:3], 0 offen offset:173
-; ALIGNED-NEXT: buffer_load_ubyte v88, v2, s[0:3], 0 offen offset:174
-; ALIGNED-NEXT: buffer_load_ubyte v78, v2, s[0:3], 0 offen offset:175
-; ALIGNED-NEXT: buffer_load_ubyte v75, v2, s[0:3], 0 offen offset:171
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v79, 8, v90
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v78, 8, v88
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1200 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x2
-; ALIGNED-NEXT: buffer_load_ubyte v76, v2, s[0:3], 0 offen offset:168
-; ALIGNED-NEXT: buffer_load_ubyte v72, v2, s[0:3], 0 offen offset:169
-; ALIGNED-NEXT: buffer_load_ubyte v63, v2, s[0:3], 0 offen offset:170
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v72, 8, v76
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v75, 8, v63
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1204 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x7
-; ALIGNED-NEXT: buffer_load_ubyte v62, v2, s[0:3], 0 offen offset:176
-; ALIGNED-NEXT: buffer_load_ubyte v60, v2, s[0:3], 0 offen offset:177
-; ALIGNED-NEXT: buffer_load_ubyte v57, v2, s[0:3], 0 offen offset:178
-; ALIGNED-NEXT: buffer_load_ubyte v56, v2, s[0:3], 0 offen offset:179
-; ALIGNED-NEXT: buffer_load_ubyte v61, v2, s[0:3], 0 offen offset:180
-; ALIGNED-NEXT: buffer_load_ubyte v59, v2, s[0:3], 0 offen offset:181
-; ALIGNED-NEXT: buffer_load_ubyte v58, v2, s[0:3], 0 offen offset:182
-; ALIGNED-NEXT: buffer_load_ubyte v47, v2, s[0:3], 0 offen offset:183
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v60, 8, v62
-; ALIGNED-NEXT: s_waitcnt vmcnt(4)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v56, 8, v57
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v47, 8, v58
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1208 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v59, 8, v61
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1212 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x4
-; ALIGNED-NEXT: buffer_load_ubyte v45, v2, s[0:3], 0 offen offset:188
-; ALIGNED-NEXT: buffer_load_ubyte v43, v2, s[0:3], 0 offen offset:189
-; ALIGNED-NEXT: buffer_load_ubyte v44, v2, s[0:3], 0 offen offset:190
-; ALIGNED-NEXT: buffer_load_ubyte v42, v2, s[0:3], 0 offen offset:191
-; ALIGNED-NEXT: buffer_load_ubyte v40, v2, s[0:3], 0 offen offset:187
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v45
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v42, 8, v44
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1216 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x2
-; ALIGNED-NEXT: buffer_load_ubyte v41, v2, s[0:3], 0 offen offset:184
-; ALIGNED-NEXT: buffer_load_ubyte v119, v2, s[0:3], 0 offen offset:185
-; ALIGNED-NEXT: buffer_load_ubyte v118, v2, s[0:3], 0 offen offset:186
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v119, 8, v41
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v40, 8, v118
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1220 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x3e
-; ALIGNED-NEXT: buffer_load_ubyte v116, v2, s[0:3], 0 offen offset:192
-; ALIGNED-NEXT: buffer_load_ubyte v113, v2, s[0:3], 0 offen offset:193
-; ALIGNED-NEXT: buffer_load_ubyte v101, v2, s[0:3], 0 offen offset:194
-; ALIGNED-NEXT: buffer_load_ubyte v102, v2, s[0:3], 0 offen offset:195
-; ALIGNED-NEXT: buffer_load_ubyte v114, v2, s[0:3], 0 offen offset:196
-; ALIGNED-NEXT: buffer_load_ubyte v103, v2, s[0:3], 0 offen offset:197
-; ALIGNED-NEXT: buffer_load_ubyte v112, v2, s[0:3], 0 offen offset:198
-; ALIGNED-NEXT: buffer_load_ubyte v100, v2, s[0:3], 0 offen offset:199
-; ALIGNED-NEXT: buffer_load_ubyte v98, v2, s[0:3], 0 offen offset:204
-; ALIGNED-NEXT: buffer_load_ubyte v97, v2, s[0:3], 0 offen offset:205
-; ALIGNED-NEXT: buffer_load_ubyte v96, v2, s[0:3], 0 offen offset:206
-; ALIGNED-NEXT: buffer_load_ubyte v87, v2, s[0:3], 0 offen offset:207
-; ALIGNED-NEXT: buffer_load_ubyte v85, v2, s[0:3], 0 offen offset:203
-; ALIGNED-NEXT: buffer_load_ubyte v86, v2, s[0:3], 0 offen offset:200
-; ALIGNED-NEXT: buffer_load_ubyte v84, v2, s[0:3], 0 offen offset:201
-; ALIGNED-NEXT: buffer_load_ubyte v83, v2, s[0:3], 0 offen offset:202
-; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:212
-; ALIGNED-NEXT: buffer_load_ubyte v68, v2, s[0:3], 0 offen offset:213
-; ALIGNED-NEXT: buffer_load_ubyte v71, v2, s[0:3], 0 offen offset:214
-; ALIGNED-NEXT: buffer_load_ubyte v65, v2, s[0:3], 0 offen offset:215
-; ALIGNED-NEXT: buffer_load_ubyte v69, v2, s[0:3], 0 offen offset:211
-; ALIGNED-NEXT: buffer_load_ubyte v80, v2, s[0:3], 0 offen offset:216
-; ALIGNED-NEXT: buffer_load_ubyte v67, v2, s[0:3], 0 offen offset:217
-; ALIGNED-NEXT: buffer_load_ubyte v53, v2, s[0:3], 0 offen offset:218
-; ALIGNED-NEXT: buffer_load_ubyte v54, v2, s[0:3], 0 offen offset:219
-; ALIGNED-NEXT: buffer_load_ubyte v70, v2, s[0:3], 0 offen offset:220
-; ALIGNED-NEXT: buffer_load_ubyte v55, v2, s[0:3], 0 offen offset:221
-; ALIGNED-NEXT: buffer_load_ubyte v64, v2, s[0:3], 0 offen offset:222
-; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:223
-; ALIGNED-NEXT: buffer_load_ubyte v50, v2, s[0:3], 0 offen offset:208
-; ALIGNED-NEXT: buffer_load_ubyte v39, v2, s[0:3], 0 offen offset:209
-; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:210
-; ALIGNED-NEXT: buffer_load_ubyte v38, v2, s[0:3], 0 offen offset:224
-; ALIGNED-NEXT: buffer_load_ubyte v36, v2, s[0:3], 0 offen offset:225
-; ALIGNED-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:226
-; ALIGNED-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:227
-; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:228
-; ALIGNED-NEXT: buffer_load_ubyte v35, v2, s[0:3], 0 offen offset:229
-; ALIGNED-NEXT: buffer_load_ubyte v34, v2, s[0:3], 0 offen offset:230
-; ALIGNED-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:231
-; ALIGNED-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:236
-; ALIGNED-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:237
-; ALIGNED-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:238
-; ALIGNED-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:239
-; ALIGNED-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:235
-; ALIGNED-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:232
-; ALIGNED-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:233
-; ALIGNED-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:234
-; ALIGNED-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:240
-; ALIGNED-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:241
-; ALIGNED-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:242
-; ALIGNED-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:243
-; ALIGNED-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:244
-; ALIGNED-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:245
-; ALIGNED-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:246
-; ALIGNED-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:247
-; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:252
-; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:253
-; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:254
-; ALIGNED-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:255
-; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:251
-; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:248
-; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:249
-; ALIGNED-NEXT: s_clause 0x6
-; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:250
-; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen
-; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:2
-; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:4
-; ALIGNED-NEXT: buffer_load_ubyte v109, v2, s[0:3], 0 offen offset:5
-; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:6
-; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:7
-; ALIGNED-NEXT: s_waitcnt vmcnt(62)
-; ALIGNED-NEXT: v_lshl_or_b32 v3, v113, 8, v116
-; ALIGNED-NEXT: v_lshl_or_b32 v4, v102, 8, v101
; ALIGNED-NEXT: v_lshl_or_b32 v106, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v103, 8, v114
; ALIGNED-NEXT: v_lshl_or_b32 v4, v100, 8, v112
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1172 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v123, 8, v125
; ALIGNED-NEXT: v_lshl_or_b32 v91, v4, 16, v3
-; ALIGNED-NEXT: s_waitcnt vmcnt(60)
; ALIGNED-NEXT: v_lshl_or_b32 v3, v97, 8, v98
-; ALIGNED-NEXT: s_waitcnt vmcnt(58)
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v93, 8, v94
; ALIGNED-NEXT: v_lshl_or_b32 v4, v87, 8, v96
-; ALIGNED-NEXT: s_waitcnt vmcnt(14)
-; ALIGNED-NEXT: v_lshl_or_b32 v73, v13, 8, v16
-; ALIGNED-NEXT: s_waitcnt vmcnt(10)
-; ALIGNED-NEXT: v_lshl_or_b32 v77, v9, 8, v10
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: buffer_store_dword v107, off, s[0:3], s32 offset:1088 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1180 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v105, 8, v110
; ALIGNED-NEXT: v_lshl_or_b32 v89, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v84, 8, v86
; ALIGNED-NEXT: v_lshl_or_b32 v4, v85, 8, v83
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1112 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v109, off, s[0:3], s32 offset:1096 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v120, off, s[0:3], s32 offset:1132 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v92, 8, v104
; ALIGNED-NEXT: v_lshl_or_b32 v74, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v68, 8, v81
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1192 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v95, 8, v108
; ALIGNED-NEXT: v_lshl_or_b32 v4, v65, 8, v71
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v78, 8, v88
; ALIGNED-NEXT: v_lshl_or_b32 v46, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v67, 8, v80
; ALIGNED-NEXT: v_lshl_or_b32 v4, v54, 8, v53
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1196 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v79, 8, v90
; ALIGNED-NEXT: v_lshl_or_b32 v117, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v55, 8, v70
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v75, 8, v63
; ALIGNED-NEXT: v_lshl_or_b32 v4, v52, 8, v64
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1200 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v72, 8, v76
; ALIGNED-NEXT: v_lshl_or_b32 v115, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v39, 8, v50
; ALIGNED-NEXT: v_lshl_or_b32 v4, v69, 8, v48
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v56, 8, v57
; ALIGNED-NEXT: v_lshl_or_b32 v99, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v36, 8, v38
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1204 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v60, 8, v62
; ALIGNED-NEXT: v_lshl_or_b32 v4, v32, 8, v33
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: v_lshl_or_b32 v82, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v35, 8, v37
; ALIGNED-NEXT: v_lshl_or_b32 v4, v31, 8, v34
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v47, 8, v58
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1208 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v59, 8, v61
; ALIGNED-NEXT: v_lshl_or_b32 v66, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v29, 8, v30
; ALIGNED-NEXT: v_lshl_or_b32 v4, v26, 8, v28
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v42, 8, v44
; ALIGNED-NEXT: v_lshl_or_b32 v51, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v23, 8, v24
; ALIGNED-NEXT: v_lshl_or_b32 v4, v25, 8, v21
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1212 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v45
; ALIGNED-NEXT: v_lshl_or_b32 v49, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v18, 8, v20
; ALIGNED-NEXT: v_lshl_or_b32 v4, v14, 8, v15
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v40, 8, v118
; ALIGNED-NEXT: v_lshl_or_b32 v27, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v4, v17, 8, v19
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1216 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v119, 8, v41
; ALIGNED-NEXT: v_lshl_or_b32 v22, v73, 16, v4
; ALIGNED-NEXT: v_lshl_or_b32 v73, v11, 8, v12
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:7
+; ALIGNED-NEXT: buffer_store_dword v107, off, s[0:3], s32 offset:1088 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v109, off, s[0:3], s32 offset:1096 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v4, v77, 16, v73
; ALIGNED-NEXT: v_lshl_or_b32 v73, v6, 8, v8
; ALIGNED-NEXT: v_lshl_or_b32 v77, v7, 8, v5
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1220 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen
+; ALIGNED-NEXT: buffer_store_dword v120, off, s[0:3], s32 offset:1132 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v3, v77, 16, v73
; ALIGNED-NEXT: s_clause 0x1
; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:1
; ALIGNED-NEXT: buffer_load_ubyte v77, v2, s[0:3], 0 offen offset:3
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1068 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v121, off, s[0:3], s32 offset:1092 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(3)
+; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1112 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(2)
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1068 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: buffer_store_dword v73, off, s[0:3], s32 offset:1076 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
@@ -4513,46 +4462,44 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: v_lshl_or_b32 v0, v77, 16, v73
; ALIGNED-NEXT: v_lshl_or_b32 v73, v109, 8, v107
; ALIGNED-NEXT: v_lshl_or_b32 v77, v1, 8, v120
+; ALIGNED-NEXT: s_clause 0x2
; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:12
+; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:15
+; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:11
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1128 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x2
+; ALIGNED-NEXT: buffer_load_ubyte v122, v2, s[0:3], 0 offen offset:8
+; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:9
+; ALIGNED-NEXT: buffer_load_ubyte v109, v2, s[0:3], 0 offen offset:10
; ALIGNED-NEXT: v_lshl_or_b32 v0, v77, 16, v73
; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:13
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1140 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:14
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
+; ALIGNED-NEXT: buffer_store_dword v126, off, s[0:3], s32 offset:1188 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(7)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1156 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: buffer_store_dword v73, off, s[0:3], s32 offset:1168 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v73, v73, 8, v1
+; ALIGNED-NEXT: v_mov_b32_e32 v1, v107
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1164 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:15
-; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:11
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: v_lshl_or_b32 v77, v107, 8, v0
-; ALIGNED-NEXT: v_mov_b32_e32 v1, v107
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1164 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v77, 16, v73
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1176 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x2
-; ALIGNED-NEXT: buffer_load_ubyte v122, v2, s[0:3], 0 offen offset:8
-; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:9
-; ALIGNED-NEXT: buffer_load_ubyte v109, v2, s[0:3], 0 offen offset:10
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: v_lshl_or_b32 v73, v120, 8, v122
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: v_lshl_or_b32 v77, v121, 8, v109
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1176 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v77, 16, v73
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1184 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_clause 0x2
; ALIGNED-NEXT: buffer_load_ubyte v77, v2, s[0:3], 0 offen offset:18
; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:16
; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:17
+; ALIGNED-NEXT: v_add_nc_u32_e32 v2, 0x100, v2
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1184 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:232
; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236
; ALIGNED-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:228
; ALIGNED-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:224
-; ALIGNED-NEXT: v_add_nc_u32_e32 v2, 0x100, v2
; ALIGNED-NEXT: s_waitcnt vmcnt(2)
; ALIGNED-NEXT: v_lshl_or_b32 v0, v126, 8, v77
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
@@ -4625,6 +4572,7 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: buffer_store_dword v89, off, s[0:3], s32 offset:220
; ALIGNED-NEXT: buffer_store_dword v91, off, s[0:3], s32 offset:212
; ALIGNED-NEXT: buffer_store_dword v106, off, s[0:3], s32 offset:208
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1220 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[3:4], v83 offset:202
; ALIGNED-NEXT: flat_store_byte v[3:4], v85 offset:203
; ALIGNED-NEXT: flat_store_byte v[3:4], v84 offset:201
@@ -4641,7 +4589,6 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: flat_store_byte v[3:4], v112 offset:198
; ALIGNED-NEXT: flat_store_byte v[3:4], v114 offset:196
; ALIGNED-NEXT: flat_store_byte v[3:4], v116 offset:192
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1220 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_addc_u32 s5, s5, 0
; ALIGNED-NEXT: v_cmp_gt_u64_e64 s6, 0x800, s[4:5]
; ALIGNED-NEXT: s_and_b32 vcc_lo, exec_lo, s6
@@ -4656,6 +4603,7 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1208 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1204 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[3:4], v118 offset:186
; ALIGNED-NEXT: flat_store_byte v[3:4], v40 offset:187
; ALIGNED-NEXT: flat_store_byte v[3:4], v119 offset:185
@@ -4672,7 +4620,6 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: flat_store_byte v[3:4], v58 offset:182
; ALIGNED-NEXT: flat_store_byte v[3:4], v61 offset:180
; ALIGNED-NEXT: flat_store_byte v[3:4], v62 offset:176
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1204 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1200 ; 4-byte Folded Reload
@@ -4684,6 +4631,7 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1192 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1180 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[3:4], v63 offset:170
; ALIGNED-NEXT: flat_store_byte v[3:4], v75 offset:171
; ALIGNED-NEXT: flat_store_byte v[3:4], v72 offset:169
@@ -4700,7 +4648,6 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: flat_store_byte v[3:4], v104 offset:166
; ALIGNED-NEXT: flat_store_byte v[3:4], v108 offset:164
; ALIGNED-NEXT: flat_store_byte v[3:4], v110 offset:160
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1180 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1172 ; 4-byte Folded Reload
@@ -4712,11 +4659,11 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1136 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1152 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[3:4], v111 offset:154
; ALIGNED-NEXT: flat_store_byte v[3:4], v124 offset:155
; ALIGNED-NEXT: flat_store_byte v[3:4], v123 offset:153
; ALIGNED-NEXT: flat_store_byte v[3:4], v127 offset:159
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1152 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:157
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1160 ; 4-byte Folded Reload
@@ -5234,9 +5181,9 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1128 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1168 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[3:4], v109 offset:10
; ALIGNED-NEXT: flat_store_byte v[3:4], v121 offset:11
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1168 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:13
; ALIGNED-NEXT: flat_store_byte v[3:4], v120 offset:9
@@ -5274,7 +5221,7 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5)
; ALIGNED-NEXT: flat_store_byte v[3:4], v0
; ALIGNED-NEXT: s_cbranch_vccnz .LBB4_1
; ALIGNED-NEXT: ; %bb.2: ; %memcpy-split
-; ALIGNED-NEXT: s_clause 0x2f
+; ALIGNED-NEXT: s_clause 0x2f ; 192-byte Folded Reload
; ALIGNED-NEXT: buffer_load_dword v127, off, s[0:3], s32
; ALIGNED-NEXT: buffer_load_dword v126, off, s[0:3], s32 offset:4
; ALIGNED-NEXT: buffer_load_dword v125, off, s[0:3], s32 offset:8
@@ -6797,7 +6744,7 @@ define void @memmove_p0_p0_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(0
; ALIGNED-NEXT: s_cbranch_scc0 .LBB5_5
; ALIGNED-NEXT: .LBB5_6: ; %Flow6
; ALIGNED-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; ALIGNED-NEXT: buffer_load_dword v47, off, s[0:3], s32
; ALIGNED-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4
; ALIGNED-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8
@@ -8296,7 +8243,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; ALIGNED-NEXT: s_cbranch_scc0 .LBB6_5
; ALIGNED-NEXT: .LBB6_6: ; %Flow8
; ALIGNED-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: s_clause 0x7 ; 32-byte Folded Reload
; ALIGNED-NEXT: buffer_load_dword v47, off, s[0:3], s32
; ALIGNED-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4
; ALIGNED-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8
@@ -8848,14 +8795,6 @@ define void @memmove_p0_p4_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(4
; ALIGNED-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:60
; ALIGNED-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:52
; ALIGNED-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:48
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v30 offset:106
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v31 offset:110
-; ALIGNED-NEXT: flat_store_byte v[96:97], v31 offset:108
-; ALIGNED-NEXT: flat_store_byte v[96:97], v30 offset:104
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v29 offset:102
-; ALIGNED-NEXT: flat_store_byte v[96:97], v29 offset:100
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v28 offset:98
-; ALIGNED-NEXT: flat_store_byte v[96:97], v28 offset:96
; ALIGNED-NEXT: flat_store_byte v[96:97], v70 offset:111
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: v_lshrrev_b32_e32 v70, 24, v6
@@ -8871,6 +8810,14 @@ define void @memmove_p0_p4_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(4
; ALIGNED-NEXT: v_lshrrev_b32_e32 v36, 24, v23
; ALIGNED-NEXT: v_lshrrev_b32_e32 v113, 24, v19
; ALIGNED-NEXT: v_lshrrev_b32_e32 v86, 24, v15
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v30 offset:106
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v31 offset:110
+; ALIGNED-NEXT: flat_store_byte v[96:97], v31 offset:108
+; ALIGNED-NEXT: flat_store_byte v[96:97], v30 offset:104
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v29 offset:102
+; ALIGNED-NEXT: flat_store_byte v[96:97], v29 offset:100
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v28 offset:98
+; ALIGNED-NEXT: flat_store_byte v[96:97], v28 offset:96
; ALIGNED-NEXT: v_lshrrev_b32_e32 v28, 24, v11
; ALIGNED-NEXT: flat_store_byte v[96:97], v98 offset:103
; ALIGNED-NEXT: v_lshrrev_b32_e32 v98, 24, v7
@@ -9297,6 +9244,10 @@ define void @memmove_p0_p4_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(4
; ALIGNED-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:476
; ALIGNED-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:468
; ALIGNED-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:464
+; ALIGNED-NEXT: flat_store_byte v[96:97], v82 offset:143
+; ALIGNED-NEXT: s_waitcnt vmcnt(3)
+; ALIGNED-NEXT: v_lshrrev_b32_e32 v82, 24, v18
+; ALIGNED-NEXT: v_lshrrev_b32_e32 v51, 8, v26
; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v38 offset:138
; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v39 offset:142
; ALIGNED-NEXT: flat_store_byte v[96:97], v39 offset:140
@@ -9305,10 +9256,6 @@ define void @memmove_p0_p4_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(4
; ALIGNED-NEXT: flat_store_byte v[96:97], v37 offset:132
; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v36 offset:130
; ALIGNED-NEXT: flat_store_byte v[96:97], v36 offset:128
-; ALIGNED-NEXT: flat_store_byte v[96:97], v82 offset:143
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: v_lshrrev_b32_e32 v82, 24, v18
-; ALIGNED-NEXT: v_lshrrev_b32_e32 v51, 8, v26
; ALIGNED-NEXT: flat_store_byte v[96:97], v66 offset:139
; ALIGNED-NEXT: flat_store_byte v[96:97], v67 offset:137
; ALIGNED-NEXT: flat_store_byte v[96:97], v83 offset:141
@@ -9344,14 +9291,6 @@ define void @memmove_p0_p4_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(4
; ALIGNED-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:316
; ALIGNED-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:308
; ALIGNED-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:304
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v30 offset:106
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v31 offset:110
-; ALIGNED-NEXT: flat_store_byte v[96:97], v31 offset:108
-; ALIGNED-NEXT: flat_store_byte v[96:97], v30 offset:104
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v29 offset:102
-; ALIGNED-NEXT: flat_store_byte v[96:97], v29 offset:100
-; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v28 offset:98
-; ALIGNED-NEXT: flat_store_byte v[96:97], v28 offset:96
; ALIGNED-NEXT: flat_store_byte v[96:97], v69 offset:111
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: v_lshrrev_b32_e32 v69, 24, v6
@@ -9366,6 +9305,14 @@ define void @memmove_p0_p4_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(4
; ALIGNED-NEXT: v_lshrrev_b32_e32 v101, 24, v25
; ALIGNED-NEXT: v_lshrrev_b32_e32 v99, 24, v19
; ALIGNED-NEXT: v_lshrrev_b32_e32 v86, 24, v15
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v30 offset:106
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v31 offset:110
+; ALIGNED-NEXT: flat_store_byte v[96:97], v31 offset:108
+; ALIGNED-NEXT: flat_store_byte v[96:97], v30 offset:104
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v29 offset:102
+; ALIGNED-NEXT: flat_store_byte v[96:97], v29 offset:100
+; ALIGNED-NEXT: flat_store_byte_d16_hi v[96:97], v28 offset:98
+; ALIGNED-NEXT: flat_store_byte v[96:97], v28 offset:96
; ALIGNED-NEXT: v_lshrrev_b32_e32 v28, 24, v11
; ALIGNED-NEXT: flat_store_byte v[96:97], v71 offset:103
; ALIGNED-NEXT: v_lshrrev_b32_e32 v71, 24, v7
@@ -12198,7 +12145,7 @@ define void @memmove_p5_p5_sz2048(ptr addrspace(5) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: s_cbranch_scc0 .LBB8_5
; ALIGNED-NEXT: .LBB8_6: ; %Flow19
; ALIGNED-NEXT: s_or_b32 exec_lo, exec_lo, s6
-; ALIGNED-NEXT: s_clause 0x2f
+; ALIGNED-NEXT: s_clause 0x2f ; 192-byte Folded Reload
; ALIGNED-NEXT: buffer_load_dword v127, off, s[0:3], s32
; ALIGNED-NEXT: buffer_load_dword v126, off, s[0:3], s32 offset:4
; ALIGNED-NEXT: buffer_load_dword v125, off, s[0:3], s32 offset:8
@@ -12645,6 +12592,11 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-LABEL: memmove_p0_p5_sz2048:
; ALIGNED: ; %bb.0: ; %entry
; ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ALIGNED-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_mov_b64 s[4:5], 0
+; ALIGNED-NEXT: s_mov_b32 s6, exec_lo
; ALIGNED-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
@@ -12693,34 +12645,29 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_store_dword v125, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v126, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v127, off, s[0:3], s32 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_mov_b64 s[4:5], 0
-; ALIGNED-NEXT: s_mov_b32 s6, exec_lo
; ALIGNED-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
; ALIGNED-NEXT: v_cmpx_ge_u32_e64 v2, v0
; ALIGNED-NEXT: s_xor_b32 s6, exec_lo, s6
; ALIGNED-NEXT: s_cbranch_execz .LBB9_2
; ALIGNED-NEXT: .LBB9_1: ; %memmove_fwd_loop
; ALIGNED-NEXT: ; =>This Inner Loop Header: Depth=1
-; ALIGNED-NEXT: s_clause 0x39
+; ALIGNED-NEXT: s_clause 0x3e
; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:20
+; ALIGNED-NEXT: buffer_load_ubyte v127, v2, s[0:3], 0 offen offset:19
; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:21
; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:22
; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:23
+; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:28
; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:24
; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:25
; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:26
-; ALIGNED-NEXT: buffer_load_ubyte v127, v2, s[0:3], 0 offen offset:19
-; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:28
+; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:27
; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:29
; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:30
; ALIGNED-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:31
; ALIGNED-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:32
; ALIGNED-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:33
; ALIGNED-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:34
-; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:27
; ALIGNED-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:35
; ALIGNED-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:36
; ALIGNED-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:37
@@ -12742,17 +12689,17 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_ubyte v36, v2, s[0:3], 0 offen offset:53
; ALIGNED-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:54
; ALIGNED-NEXT: buffer_load_ubyte v35, v2, s[0:3], 0 offen offset:55
+; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:60
; ALIGNED-NEXT: buffer_load_ubyte v39, v2, s[0:3], 0 offen offset:56
; ALIGNED-NEXT: buffer_load_ubyte v50, v2, s[0:3], 0 offen offset:57
; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:58
-; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:60
+; ALIGNED-NEXT: buffer_load_ubyte v51, v2, s[0:3], 0 offen offset:59
; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:61
; ALIGNED-NEXT: buffer_load_ubyte v38, v2, s[0:3], 0 offen offset:62
; ALIGNED-NEXT: buffer_load_ubyte v49, v2, s[0:3], 0 offen offset:63
; ALIGNED-NEXT: buffer_load_ubyte v53, v2, s[0:3], 0 offen offset:64
; ALIGNED-NEXT: buffer_load_ubyte v54, v2, s[0:3], 0 offen offset:65
; ALIGNED-NEXT: buffer_load_ubyte v65, v2, s[0:3], 0 offen offset:66
-; ALIGNED-NEXT: buffer_load_ubyte v51, v2, s[0:3], 0 offen offset:59
; ALIGNED-NEXT: buffer_load_ubyte v55, v2, s[0:3], 0 offen offset:67
; ALIGNED-NEXT: buffer_load_ubyte v64, v2, s[0:3], 0 offen offset:68
; ALIGNED-NEXT: buffer_load_ubyte v66, v2, s[0:3], 0 offen offset:69
@@ -12760,58 +12707,94 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_ubyte v68, v2, s[0:3], 0 offen offset:71
; ALIGNED-NEXT: buffer_load_ubyte v69, v2, s[0:3], 0 offen offset:76
; ALIGNED-NEXT: buffer_load_ubyte v70, v2, s[0:3], 0 offen offset:77
-; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:75
; ALIGNED-NEXT: buffer_load_ubyte v71, v2, s[0:3], 0 offen offset:78
; ALIGNED-NEXT: buffer_load_ubyte v80, v2, s[0:3], 0 offen offset:79
-; ALIGNED-NEXT: s_waitcnt vmcnt(57)
+; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:75
+; ALIGNED-NEXT: buffer_load_ubyte v126, v2, s[0:3], 0 offen offset:159
+; ALIGNED-NEXT: buffer_load_ubyte v124, v2, s[0:3], 0 offen offset:155
+; ALIGNED-NEXT: buffer_load_ubyte v123, v2, s[0:3], 0 offen offset:152
+; ALIGNED-NEXT: buffer_load_ubyte v122, v2, s[0:3], 0 offen offset:153
+; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:154
+; ALIGNED-NEXT: s_clause 0x30
+; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:160
+; ALIGNED-NEXT: buffer_load_ubyte v108, v2, s[0:3], 0 offen offset:161
+; ALIGNED-NEXT: buffer_load_ubyte v104, v2, s[0:3], 0 offen offset:162
+; ALIGNED-NEXT: buffer_load_ubyte v105, v2, s[0:3], 0 offen offset:163
+; ALIGNED-NEXT: buffer_load_ubyte v111, v2, s[0:3], 0 offen offset:164
+; ALIGNED-NEXT: buffer_load_ubyte v106, v2, s[0:3], 0 offen offset:165
+; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:166
+; ALIGNED-NEXT: buffer_load_ubyte v94, v2, s[0:3], 0 offen offset:167
+; ALIGNED-NEXT: buffer_load_ubyte v40, v2, s[0:3], 0 offen offset:192
+; ALIGNED-NEXT: buffer_load_ubyte v117, v2, s[0:3], 0 offen offset:193
+; ALIGNED-NEXT: buffer_load_ubyte v113, v2, s[0:3], 0 offen offset:194
+; ALIGNED-NEXT: buffer_load_ubyte v114, v2, s[0:3], 0 offen offset:195
+; ALIGNED-NEXT: buffer_load_ubyte v118, v2, s[0:3], 0 offen offset:196
+; ALIGNED-NEXT: buffer_load_ubyte v115, v2, s[0:3], 0 offen offset:197
+; ALIGNED-NEXT: buffer_load_ubyte v116, v2, s[0:3], 0 offen offset:198
+; ALIGNED-NEXT: buffer_load_ubyte v112, v2, s[0:3], 0 offen offset:199
+; ALIGNED-NEXT: buffer_load_ubyte v102, v2, s[0:3], 0 offen offset:204
+; ALIGNED-NEXT: buffer_load_ubyte v101, v2, s[0:3], 0 offen offset:205
+; ALIGNED-NEXT: buffer_load_ubyte v100, v2, s[0:3], 0 offen offset:206
+; ALIGNED-NEXT: buffer_load_ubyte v99, v2, s[0:3], 0 offen offset:207
+; ALIGNED-NEXT: buffer_load_ubyte v97, v2, s[0:3], 0 offen offset:203
+; ALIGNED-NEXT: buffer_load_ubyte v98, v2, s[0:3], 0 offen offset:200
+; ALIGNED-NEXT: buffer_load_ubyte v96, v2, s[0:3], 0 offen offset:201
+; ALIGNED-NEXT: buffer_load_ubyte v87, v2, s[0:3], 0 offen offset:202
+; ALIGNED-NEXT: buffer_load_ubyte v92, v2, s[0:3], 0 offen offset:172
+; ALIGNED-NEXT: buffer_load_ubyte v89, v2, s[0:3], 0 offen offset:173
+; ALIGNED-NEXT: buffer_load_ubyte v90, v2, s[0:3], 0 offen offset:174
+; ALIGNED-NEXT: buffer_load_ubyte v88, v2, s[0:3], 0 offen offset:175
+; ALIGNED-NEXT: buffer_load_ubyte v78, v2, s[0:3], 0 offen offset:171
+; ALIGNED-NEXT: buffer_load_ubyte v79, v2, s[0:3], 0 offen offset:168
+; ALIGNED-NEXT: buffer_load_ubyte v76, v2, s[0:3], 0 offen offset:169
+; ALIGNED-NEXT: buffer_load_ubyte v75, v2, s[0:3], 0 offen offset:170
+; ALIGNED-NEXT: buffer_load_ubyte v74, v2, s[0:3], 0 offen offset:176
+; ALIGNED-NEXT: buffer_load_ubyte v72, v2, s[0:3], 0 offen offset:177
+; ALIGNED-NEXT: buffer_load_ubyte v61, v2, s[0:3], 0 offen offset:178
+; ALIGNED-NEXT: buffer_load_ubyte v60, v2, s[0:3], 0 offen offset:179
+; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:180
+; ALIGNED-NEXT: buffer_load_ubyte v63, v2, s[0:3], 0 offen offset:181
+; ALIGNED-NEXT: buffer_load_ubyte v62, v2, s[0:3], 0 offen offset:182
+; ALIGNED-NEXT: buffer_load_ubyte v59, v2, s[0:3], 0 offen offset:183
+; ALIGNED-NEXT: buffer_load_ubyte v57, v2, s[0:3], 0 offen offset:188
+; ALIGNED-NEXT: buffer_load_ubyte v47, v2, s[0:3], 0 offen offset:189
+; ALIGNED-NEXT: buffer_load_ubyte v56, v2, s[0:3], 0 offen offset:190
+; ALIGNED-NEXT: buffer_load_ubyte v46, v2, s[0:3], 0 offen offset:191
+; ALIGNED-NEXT: buffer_load_ubyte v44, v2, s[0:3], 0 offen offset:187
+; ALIGNED-NEXT: buffer_load_ubyte v45, v2, s[0:3], 0 offen offset:184
+; ALIGNED-NEXT: buffer_load_ubyte v43, v2, s[0:3], 0 offen offset:185
+; ALIGNED-NEXT: buffer_load_ubyte v42, v2, s[0:3], 0 offen offset:186
+; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:4
+; ALIGNED-NEXT: s_waitcnt vmcnt(62)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(56)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(55)
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(54)
; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(53)
+; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(52)
; ALIGNED-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(51)
; ALIGNED-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(49)
-; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(48)
+; ALIGNED-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(47)
; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(46)
; ALIGNED-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(45)
; ALIGNED-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 8, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v7, 8, v5
-; ALIGNED-NEXT: s_waitcnt vmcnt(42)
-; ALIGNED-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_lshl_or_b32 v4, v9, 8, v8
-; ALIGNED-NEXT: s_waitcnt vmcnt(40)
; ALIGNED-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v4, v9, 8, v8
; ALIGNED-NEXT: v_lshl_or_b32 v5, v10, 8, v6
; ALIGNED-NEXT: v_lshl_or_b32 v6, v11, 8, v12
; ALIGNED-NEXT: v_lshl_or_b32 v7, v15, 8, v14
; ALIGNED-NEXT: v_lshl_or_b32 v8, v19, 8, v17
-; ALIGNED-NEXT: s_waitcnt vmcnt(39)
; ALIGNED-NEXT: v_lshl_or_b32 v9, v16, 8, v13
-; ALIGNED-NEXT: s_waitcnt vmcnt(37)
; ALIGNED-NEXT: v_lshl_or_b32 v10, v20, 8, v18
-; ALIGNED-NEXT: s_waitcnt vmcnt(35)
; ALIGNED-NEXT: v_lshl_or_b32 v11, v23, 8, v22
-; ALIGNED-NEXT: s_waitcnt vmcnt(33)
; ALIGNED-NEXT: v_lshl_or_b32 v12, v27, 8, v25
-; ALIGNED-NEXT: s_waitcnt vmcnt(31)
; ALIGNED-NEXT: v_lshl_or_b32 v13, v24, 8, v21
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(29)
; ALIGNED-NEXT: v_lshl_or_b32 v14, v28, 8, v26
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v6, 16, v5
; ALIGNED-NEXT: v_lshl_or_b32 v4, v8, 16, v7
@@ -12819,82 +12802,81 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: v_lshl_or_b32 v6, v12, 16, v11
; ALIGNED-NEXT: v_lshl_or_b32 v7, v14, 16, v13
; ALIGNED-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(27)
; ALIGNED-NEXT: v_lshl_or_b32 v15, v30, 8, v29
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(25)
; ALIGNED-NEXT: v_lshl_or_b32 v0, v32, 8, v34
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(23)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v36, 8, v31
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(21)
; ALIGNED-NEXT: v_lshl_or_b32 v3, v35, 8, v33
; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(16)
; ALIGNED-NEXT: v_lshl_or_b32 v4, v48, 8, v37
; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(14)
; ALIGNED-NEXT: v_lshl_or_b32 v5, v49, 8, v38
; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v6, v50, 8, v39
; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(10)
; ALIGNED-NEXT: v_lshl_or_b32 v7, v51, 8, v52
; ALIGNED-NEXT: v_lshl_or_b32 v0, v0, 16, v15
; ALIGNED-NEXT: v_lshl_or_b32 v1, v3, 16, v1
; ALIGNED-NEXT: v_lshl_or_b32 v3, v5, 16, v4
+; ALIGNED-NEXT: s_clause 0x1
; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:85
+; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:87
; ALIGNED-NEXT: v_lshl_or_b32 v4, v7, 16, v6
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v54, 8, v53
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(10)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v55, 8, v65
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(8)
+; ALIGNED-NEXT: s_waitcnt vmcnt(62)
; ALIGNED-NEXT: v_lshl_or_b32 v3, v66, 8, v64
; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
+; ALIGNED-NEXT: s_waitcnt vmcnt(61)
; ALIGNED-NEXT: v_lshl_or_b32 v4, v68, 8, v67
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:86
; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:82
-; ALIGNED-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:86
+; ALIGNED-NEXT: s_waitcnt vmcnt(62)
+; ALIGNED-NEXT: buffer_store_dword v69, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 16, v3
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
+; ALIGNED-NEXT: s_waitcnt vmcnt(61)
; ALIGNED-NEXT: v_lshl_or_b32 v0, v70, 8, v69
; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:83
; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:74
+; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:83
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:984 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(5)
+; ALIGNED-NEXT: s_waitcnt vmcnt(61)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v80, 8, v71
-; ALIGNED-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v71, off, s[0:3], s32 offset:976 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v80, off, s[0:3], s32 offset:980 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x4
+; ALIGNED-NEXT: buffer_load_ubyte v85, v2, s[0:3], 0 offen offset:212
+; ALIGNED-NEXT: buffer_load_ubyte v71, v2, s[0:3], 0 offen offset:213
+; ALIGNED-NEXT: buffer_load_ubyte v83, v2, s[0:3], 0 offen offset:214
+; ALIGNED-NEXT: buffer_load_ubyte v69, v2, s[0:3], 0 offen offset:215
+; ALIGNED-NEXT: buffer_load_ubyte v80, v2, s[0:3], 0 offen offset:211
+; ALIGNED-NEXT: buffer_store_dword v65, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:73
-; ALIGNED-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v64, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v66, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v67, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:996 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:72
-; ALIGNED-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v68, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(62)
+; ALIGNED-NEXT: buffer_store_dword v81, off, s[0:3], s32 offset:1000 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x2
+; ALIGNED-NEXT: buffer_load_ubyte v54, v2, s[0:3], 0 offen offset:208
+; ALIGNED-NEXT: buffer_load_ubyte v51, v2, s[0:3], 0 offen offset:209
+; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:210
; ALIGNED-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
@@ -12902,47 +12884,97 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v65, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v64, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v66, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v67, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v68, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v69, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: buffer_load_ubyte v50, v2, s[0:3], 0 offen offset:224
+; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:225
+; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:226
+; ALIGNED-NEXT: buffer_load_ubyte v36, v2, s[0:3], 0 offen offset:227
+; ALIGNED-NEXT: buffer_load_ubyte v49, v2, s[0:3], 0 offen offset:228
+; ALIGNED-NEXT: buffer_load_ubyte v39, v2, s[0:3], 0 offen offset:229
+; ALIGNED-NEXT: buffer_load_ubyte v38, v2, s[0:3], 0 offen offset:230
+; ALIGNED-NEXT: buffer_load_ubyte v35, v2, s[0:3], 0 offen offset:231
+; ALIGNED-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x4
+; ALIGNED-NEXT: buffer_load_ubyte v34, v2, s[0:3], 0 offen offset:236
+; ALIGNED-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:237
+; ALIGNED-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:238
+; ALIGNED-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:239
+; ALIGNED-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:235
+; ALIGNED-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x2
+; ALIGNED-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:232
+; ALIGNED-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:233
+; ALIGNED-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:234
+; ALIGNED-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0xc
+; ALIGNED-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:240
+; ALIGNED-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:241
+; ALIGNED-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:242
+; ALIGNED-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:243
+; ALIGNED-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:244
+; ALIGNED-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:245
+; ALIGNED-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:246
+; ALIGNED-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:247
+; ALIGNED-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:252
+; ALIGNED-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:253
+; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:254
+; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:255
+; ALIGNED-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:251
; ALIGNED-NEXT: buffer_store_dword v70, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v71, off, s[0:3], s32 offset:976 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v80, off, s[0:3], s32 offset:980 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v81, off, s[0:3], s32 offset:1000 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v127, off, s[0:3], s32 offset:1412 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:87
-; ALIGNED-NEXT: s_waitcnt vmcnt(7)
+; ALIGNED-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(44)
; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1032 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
-; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:1036 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(5)
+; ALIGNED-NEXT: s_waitcnt vmcnt(43)
+; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:1040 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(42)
; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:1024 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(4)
-; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1020 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
+; ALIGNED-NEXT: s_waitcnt vmcnt(41)
+; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:1036 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(40)
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1004 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
+; ALIGNED-NEXT: s_waitcnt vmcnt(33)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:992 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
+; ALIGNED-NEXT: s_waitcnt vmcnt(32)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:988 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v81, 8, v3
; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:84
+; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1020 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: buffer_load_ubyte v84, v2, s[0:3], 0 offen offset:216
+; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:217
+; ALIGNED-NEXT: buffer_load_ubyte v65, v2, s[0:3], 0 offen offset:218
+; ALIGNED-NEXT: buffer_load_ubyte v66, v2, s[0:3], 0 offen offset:219
+; ALIGNED-NEXT: buffer_load_ubyte v82, v2, s[0:3], 0 offen offset:220
+; ALIGNED-NEXT: buffer_load_ubyte v67, v2, s[0:3], 0 offen offset:221
+; ALIGNED-NEXT: buffer_load_ubyte v68, v2, s[0:3], 0 offen offset:222
+; ALIGNED-NEXT: buffer_load_ubyte v64, v2, s[0:3], 0 offen offset:223
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:81
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1008 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:80
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:1040 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
+; ALIGNED-NEXT: s_waitcnt vmcnt(16)
+; ALIGNED-NEXT: v_lshl_or_b32 v95, v16, 8, v20
+; ALIGNED-NEXT: s_waitcnt vmcnt(12)
+; ALIGNED-NEXT: v_lshl_or_b32 v109, v11, 8, v12
+; ALIGNED-NEXT: s_waitcnt vmcnt(10)
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1028 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1016 ; 4-byte Folded Spill
@@ -13214,289 +13246,158 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 8, v7
+; ALIGNED-NEXT: v_lshl_or_b32 v4, v114, 8, v113
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v8, 8, v6
+; ALIGNED-NEXT: s_clause 0x3
+; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:248
+; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:249
+; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:250
+; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:6
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v5, 8, v3
+; ALIGNED-NEXT: s_clause 0x1
; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:158
+; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:5
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:157
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:156
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
+; ALIGNED-NEXT: s_waitcnt vmcnt(3)
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1452 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1448 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1440 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v126, v2, s[0:3], 0 offen offset:159
-; ALIGNED-NEXT: buffer_load_ubyte v124, v2, s[0:3], 0 offen offset:155
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v126, 8, v3
+; ALIGNED-NEXT: v_lshl_or_b32 v3, v117, 8, v40
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x2
-; ALIGNED-NEXT: buffer_load_ubyte v123, v2, s[0:3], 0 offen offset:152
-; ALIGNED-NEXT: buffer_load_ubyte v122, v2, s[0:3], 0 offen offset:153
-; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:154
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v122, 8, v123
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v124, 8, v121
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x7
-; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:160
-; ALIGNED-NEXT: buffer_load_ubyte v108, v2, s[0:3], 0 offen offset:161
-; ALIGNED-NEXT: buffer_load_ubyte v104, v2, s[0:3], 0 offen offset:162
-; ALIGNED-NEXT: buffer_load_ubyte v105, v2, s[0:3], 0 offen offset:163
-; ALIGNED-NEXT: buffer_load_ubyte v111, v2, s[0:3], 0 offen offset:164
-; ALIGNED-NEXT: buffer_load_ubyte v106, v2, s[0:3], 0 offen offset:165
-; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:166
-; ALIGNED-NEXT: buffer_load_ubyte v94, v2, s[0:3], 0 offen offset:167
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v108, 8, v120
-; ALIGNED-NEXT: s_waitcnt vmcnt(4)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v105, 8, v104
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v94, 8, v107
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v106, 8, v111
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x4
-; ALIGNED-NEXT: buffer_load_ubyte v92, v2, s[0:3], 0 offen offset:172
-; ALIGNED-NEXT: buffer_load_ubyte v89, v2, s[0:3], 0 offen offset:173
-; ALIGNED-NEXT: buffer_load_ubyte v90, v2, s[0:3], 0 offen offset:174
-; ALIGNED-NEXT: buffer_load_ubyte v88, v2, s[0:3], 0 offen offset:175
-; ALIGNED-NEXT: buffer_load_ubyte v78, v2, s[0:3], 0 offen offset:171
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v89, 8, v92
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v88, 8, v90
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x2
-; ALIGNED-NEXT: buffer_load_ubyte v79, v2, s[0:3], 0 offen offset:168
-; ALIGNED-NEXT: buffer_load_ubyte v76, v2, s[0:3], 0 offen offset:169
-; ALIGNED-NEXT: buffer_load_ubyte v75, v2, s[0:3], 0 offen offset:170
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 8, v79
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v78, 8, v75
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x7
-; ALIGNED-NEXT: buffer_load_ubyte v74, v2, s[0:3], 0 offen offset:176
-; ALIGNED-NEXT: buffer_load_ubyte v72, v2, s[0:3], 0 offen offset:177
-; ALIGNED-NEXT: buffer_load_ubyte v61, v2, s[0:3], 0 offen offset:178
-; ALIGNED-NEXT: buffer_load_ubyte v60, v2, s[0:3], 0 offen offset:179
-; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:180
-; ALIGNED-NEXT: buffer_load_ubyte v63, v2, s[0:3], 0 offen offset:181
-; ALIGNED-NEXT: buffer_load_ubyte v62, v2, s[0:3], 0 offen offset:182
-; ALIGNED-NEXT: buffer_load_ubyte v59, v2, s[0:3], 0 offen offset:183
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v72, 8, v74
-; ALIGNED-NEXT: s_waitcnt vmcnt(4)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v60, 8, v61
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v59, 8, v62
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v63, 8, v73
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x4
-; ALIGNED-NEXT: buffer_load_ubyte v57, v2, s[0:3], 0 offen offset:188
-; ALIGNED-NEXT: buffer_load_ubyte v47, v2, s[0:3], 0 offen offset:189
-; ALIGNED-NEXT: buffer_load_ubyte v56, v2, s[0:3], 0 offen offset:190
-; ALIGNED-NEXT: buffer_load_ubyte v46, v2, s[0:3], 0 offen offset:191
-; ALIGNED-NEXT: buffer_load_ubyte v44, v2, s[0:3], 0 offen offset:187
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v47, 8, v57
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v46, 8, v56
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1488 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x2
-; ALIGNED-NEXT: buffer_load_ubyte v45, v2, s[0:3], 0 offen offset:184
-; ALIGNED-NEXT: buffer_load_ubyte v43, v2, s[0:3], 0 offen offset:185
-; ALIGNED-NEXT: buffer_load_ubyte v42, v2, s[0:3], 0 offen offset:186
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v45
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v44, 8, v42
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1492 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x3e
-; ALIGNED-NEXT: buffer_load_ubyte v40, v2, s[0:3], 0 offen offset:192
-; ALIGNED-NEXT: buffer_load_ubyte v117, v2, s[0:3], 0 offen offset:193
-; ALIGNED-NEXT: buffer_load_ubyte v113, v2, s[0:3], 0 offen offset:194
-; ALIGNED-NEXT: buffer_load_ubyte v114, v2, s[0:3], 0 offen offset:195
-; ALIGNED-NEXT: buffer_load_ubyte v118, v2, s[0:3], 0 offen offset:196
-; ALIGNED-NEXT: buffer_load_ubyte v115, v2, s[0:3], 0 offen offset:197
-; ALIGNED-NEXT: buffer_load_ubyte v116, v2, s[0:3], 0 offen offset:198
-; ALIGNED-NEXT: buffer_load_ubyte v112, v2, s[0:3], 0 offen offset:199
-; ALIGNED-NEXT: buffer_load_ubyte v102, v2, s[0:3], 0 offen offset:204
-; ALIGNED-NEXT: buffer_load_ubyte v101, v2, s[0:3], 0 offen offset:205
-; ALIGNED-NEXT: buffer_load_ubyte v100, v2, s[0:3], 0 offen offset:206
-; ALIGNED-NEXT: buffer_load_ubyte v99, v2, s[0:3], 0 offen offset:207
-; ALIGNED-NEXT: buffer_load_ubyte v97, v2, s[0:3], 0 offen offset:203
-; ALIGNED-NEXT: buffer_load_ubyte v98, v2, s[0:3], 0 offen offset:200
-; ALIGNED-NEXT: buffer_load_ubyte v96, v2, s[0:3], 0 offen offset:201
-; ALIGNED-NEXT: buffer_load_ubyte v87, v2, s[0:3], 0 offen offset:202
-; ALIGNED-NEXT: buffer_load_ubyte v85, v2, s[0:3], 0 offen offset:212
-; ALIGNED-NEXT: buffer_load_ubyte v71, v2, s[0:3], 0 offen offset:213
-; ALIGNED-NEXT: buffer_load_ubyte v83, v2, s[0:3], 0 offen offset:214
-; ALIGNED-NEXT: buffer_load_ubyte v69, v2, s[0:3], 0 offen offset:215
-; ALIGNED-NEXT: buffer_load_ubyte v80, v2, s[0:3], 0 offen offset:211
-; ALIGNED-NEXT: buffer_load_ubyte v84, v2, s[0:3], 0 offen offset:216
-; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:217
-; ALIGNED-NEXT: buffer_load_ubyte v65, v2, s[0:3], 0 offen offset:218
-; ALIGNED-NEXT: buffer_load_ubyte v66, v2, s[0:3], 0 offen offset:219
-; ALIGNED-NEXT: buffer_load_ubyte v82, v2, s[0:3], 0 offen offset:220
-; ALIGNED-NEXT: buffer_load_ubyte v67, v2, s[0:3], 0 offen offset:221
-; ALIGNED-NEXT: buffer_load_ubyte v68, v2, s[0:3], 0 offen offset:222
-; ALIGNED-NEXT: buffer_load_ubyte v64, v2, s[0:3], 0 offen offset:223
-; ALIGNED-NEXT: buffer_load_ubyte v54, v2, s[0:3], 0 offen offset:208
-; ALIGNED-NEXT: buffer_load_ubyte v51, v2, s[0:3], 0 offen offset:209
-; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:210
-; ALIGNED-NEXT: buffer_load_ubyte v50, v2, s[0:3], 0 offen offset:224
-; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:225
-; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:226
-; ALIGNED-NEXT: buffer_load_ubyte v36, v2, s[0:3], 0 offen offset:227
-; ALIGNED-NEXT: buffer_load_ubyte v49, v2, s[0:3], 0 offen offset:228
-; ALIGNED-NEXT: buffer_load_ubyte v39, v2, s[0:3], 0 offen offset:229
-; ALIGNED-NEXT: buffer_load_ubyte v38, v2, s[0:3], 0 offen offset:230
-; ALIGNED-NEXT: buffer_load_ubyte v35, v2, s[0:3], 0 offen offset:231
-; ALIGNED-NEXT: buffer_load_ubyte v34, v2, s[0:3], 0 offen offset:236
-; ALIGNED-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:237
-; ALIGNED-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:238
-; ALIGNED-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:239
-; ALIGNED-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:235
-; ALIGNED-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:232
-; ALIGNED-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:233
-; ALIGNED-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:234
-; ALIGNED-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:240
-; ALIGNED-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:241
-; ALIGNED-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:242
-; ALIGNED-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:243
-; ALIGNED-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:244
-; ALIGNED-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:245
-; ALIGNED-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:246
-; ALIGNED-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:247
-; ALIGNED-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:252
-; ALIGNED-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:253
-; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:254
-; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:255
-; ALIGNED-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:251
-; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:248
-; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:249
-; ALIGNED-NEXT: s_clause 0x5
-; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:250
-; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:2
-; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:3
-; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:4
-; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:5
-; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:6
-; ALIGNED-NEXT: s_waitcnt vmcnt(62)
-; ALIGNED-NEXT: v_lshl_or_b32 v3, v117, 8, v40
-; ALIGNED-NEXT: v_lshl_or_b32 v4, v114, 8, v113
; ALIGNED-NEXT: v_lshl_or_b32 v110, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v115, 8, v118
-; ALIGNED-NEXT: s_waitcnt vmcnt(61)
; ALIGNED-NEXT: v_lshl_or_b32 v4, v112, 8, v116
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v122, 8, v123
; ALIGNED-NEXT: v_lshl_or_b32 v93, v4, 16, v3
-; ALIGNED-NEXT: s_waitcnt vmcnt(59)
; ALIGNED-NEXT: v_lshl_or_b32 v3, v101, 8, v102
-; ALIGNED-NEXT: s_waitcnt vmcnt(57)
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v105, 8, v104
; ALIGNED-NEXT: v_lshl_or_b32 v4, v99, 8, v100
-; ALIGNED-NEXT: s_waitcnt vmcnt(13)
-; ALIGNED-NEXT: v_lshl_or_b32 v95, v16, 8, v20
-; ALIGNED-NEXT: s_waitcnt vmcnt(9)
-; ALIGNED-NEXT: v_lshl_or_b32 v109, v11, 8, v12
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v108, 8, v120
; ALIGNED-NEXT: v_lshl_or_b32 v91, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v96, 8, v98
; ALIGNED-NEXT: v_lshl_or_b32 v4, v97, 8, v87
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v94, 8, v107
; ALIGNED-NEXT: v_lshl_or_b32 v77, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v71, 8, v85
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v106, 8, v111
; ALIGNED-NEXT: v_lshl_or_b32 v4, v69, 8, v83
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v88, 8, v90
; ALIGNED-NEXT: v_lshl_or_b32 v58, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v81, 8, v84
; ALIGNED-NEXT: v_lshl_or_b32 v4, v66, 8, v65
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v89, 8, v92
; ALIGNED-NEXT: v_lshl_or_b32 v41, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v67, 8, v82
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v78, 8, v75
; ALIGNED-NEXT: v_lshl_or_b32 v4, v64, 8, v68
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 8, v79
; ALIGNED-NEXT: v_lshl_or_b32 v119, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v51, 8, v54
; ALIGNED-NEXT: v_lshl_or_b32 v4, v80, 8, v52
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v60, 8, v61
; ALIGNED-NEXT: v_lshl_or_b32 v103, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v48, 8, v50
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v72, 8, v74
; ALIGNED-NEXT: v_lshl_or_b32 v4, v36, 8, v37
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: v_lshl_or_b32 v86, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v39, 8, v49
; ALIGNED-NEXT: v_lshl_or_b32 v4, v35, 8, v38
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v59, 8, v62
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v63, 8, v73
; ALIGNED-NEXT: v_lshl_or_b32 v70, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v33, 8, v34
; ALIGNED-NEXT: v_lshl_or_b32 v4, v30, 8, v32
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v46, 8, v56
; ALIGNED-NEXT: v_lshl_or_b32 v55, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v27, 8, v28
; ALIGNED-NEXT: v_lshl_or_b32 v4, v29, 8, v25
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v47, 8, v57
; ALIGNED-NEXT: v_lshl_or_b32 v53, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v3, v22, 8, v24
; ALIGNED-NEXT: v_lshl_or_b32 v4, v18, 8, v17
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v44, 8, v42
; ALIGNED-NEXT: v_lshl_or_b32 v31, v4, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v4, v19, 8, v23
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1488 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v45
; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen
; ALIGNED-NEXT: v_lshl_or_b32 v26, v95, 16, v4
; ALIGNED-NEXT: v_lshl_or_b32 v95, v13, 8, v14
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: s_clause 0x1
+; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:2
; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:7
-; ALIGNED-NEXT: s_waitcnt vmcnt(5)
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1292 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1300 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(4)
-; ALIGNED-NEXT: buffer_store_dword v125, off, s[0:3], s32 offset:1296 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:1332 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v21, v109, 16, v95
; ALIGNED-NEXT: v_lshl_or_b32 v95, v8, 8, v10
; ALIGNED-NEXT: v_lshl_or_b32 v109, v9, 8, v7
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1492 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:3
; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1304 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
-; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:1332 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v125, off, s[0:3], s32 offset:1296 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v15, v109, 16, v95
; ALIGNED-NEXT: buffer_load_ubyte v95, v2, s[0:3], 0 offen offset:1
-; ALIGNED-NEXT: v_lshl_or_b32 v109, v0, 8, v1
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
+; ALIGNED-NEXT: s_waitcnt vmcnt(4)
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1260 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
+; ALIGNED-NEXT: s_waitcnt vmcnt(3)
+; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1300 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(2)
; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1340 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
+; ALIGNED-NEXT: s_waitcnt vmcnt(1)
+; ALIGNED-NEXT: v_lshl_or_b32 v109, v0, 8, v1
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1292 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:14
+; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: buffer_store_dword v95, off, s[0:3], s32 offset:1284 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v95, v95, 8, v3
; ALIGNED-NEXT: v_lshl_or_b32 v0, v109, 16, v95
; ALIGNED-NEXT: v_lshl_or_b32 v95, v5, 8, v125
; ALIGNED-NEXT: v_lshl_or_b32 v109, v4, 8, v6
+; ALIGNED-NEXT: s_clause 0x1
+; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:12
+; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:13
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1312 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v109, 16, v95
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1348 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x4
-; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:12
-; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:13
-; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:14
+; ALIGNED-NEXT: s_clause 0x1
; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:15
; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:11
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
+; ALIGNED-NEXT: buffer_store_dword v127, off, s[0:3], s32 offset:1412 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(4)
+; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1364 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(2)
; ALIGNED-NEXT: v_lshl_or_b32 v95, v4, 8, v6
; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1376 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
+; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:9
+; ALIGNED-NEXT: s_waitcnt vmcnt(2)
; ALIGNED-NEXT: v_lshl_or_b32 v109, v0, 8, v1
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1368 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1364 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:9
; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:10
; ALIGNED-NEXT: v_lshl_or_b32 v0, v109, 16, v95
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1392 ; 4-byte Folded Spill
@@ -13509,19 +13410,19 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: v_lshl_or_b32 v95, v4, 8, v0
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1380 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v109, 16, v95
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_clause 0x2
; ALIGNED-NEXT: buffer_load_ubyte v109, v2, s[0:3], 0 offen offset:18
; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:16
; ALIGNED-NEXT: buffer_load_ubyte v95, v2, s[0:3], 0 offen offset:17
+; ALIGNED-NEXT: v_add_nc_u32_e32 v2, 0x100, v2
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:232
; ALIGNED-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:236
; ALIGNED-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:228
; ALIGNED-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:224
-; ALIGNED-NEXT: s_clause 0x1
+; ALIGNED-NEXT: s_clause 0x1 ; 8-byte Folded Reload
; ALIGNED-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704
; ALIGNED-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:708
-; ALIGNED-NEXT: v_add_nc_u32_e32 v2, 0x100, v2
; ALIGNED-NEXT: s_waitcnt vmcnt(4)
; ALIGNED-NEXT: v_lshl_or_b32 v0, v127, 8, v109
; ALIGNED-NEXT: s_waitcnt vmcnt(2)
@@ -13590,6 +13491,8 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_store_dword v91, off, s[0:3], s32 offset:220
; ALIGNED-NEXT: buffer_store_dword v93, off, s[0:3], s32 offset:212
; ALIGNED-NEXT: buffer_store_dword v110, off, s[0:3], s32 offset:208
+; ALIGNED-NEXT: v_lshl_or_b32 v127, v0, 16, v127
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1492 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[3:4], v87 offset:202
; ALIGNED-NEXT: flat_store_byte v[3:4], v97 offset:203
; ALIGNED-NEXT: flat_store_byte v[3:4], v96 offset:201
@@ -13606,8 +13509,6 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: flat_store_byte v[3:4], v116 offset:198
; ALIGNED-NEXT: flat_store_byte v[3:4], v118 offset:196
; ALIGNED-NEXT: flat_store_byte v[3:4], v40 offset:192
-; ALIGNED-NEXT: v_lshl_or_b32 v127, v0, 16, v127
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1492 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_add_u32 s4, s4, 0x100
; ALIGNED-NEXT: s_addc_u32 s5, s5, 0
; ALIGNED-NEXT: s_cmp_lg_u64 s[4:5], 0x800
@@ -13622,6 +13523,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[3:4], v42 offset:186
; ALIGNED-NEXT: flat_store_byte v[3:4], v44 offset:187
; ALIGNED-NEXT: flat_store_byte v[3:4], v43 offset:185
@@ -13638,7 +13540,6 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: flat_store_byte v[3:4], v62 offset:182
; ALIGNED-NEXT: flat_store_byte v[3:4], v73 offset:180
; ALIGNED-NEXT: flat_store_byte v[3:4], v74 offset:176
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Reload
@@ -13650,6 +13551,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[3:4], v75 offset:170
; ALIGNED-NEXT: flat_store_byte v[3:4], v78 offset:171
; ALIGNED-NEXT: flat_store_byte v[3:4], v76 offset:169
@@ -13666,7 +13568,6 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: flat_store_byte v[3:4], v107 offset:166
; ALIGNED-NEXT: flat_store_byte v[3:4], v111 offset:164
; ALIGNED-NEXT: flat_store_byte v[3:4], v120 offset:160
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Reload
@@ -13678,11 +13579,11 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1448 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[3:4], v121 offset:154
; ALIGNED-NEXT: flat_store_byte v[3:4], v124 offset:155
; ALIGNED-NEXT: flat_store_byte v[3:4], v122 offset:153
; ALIGNED-NEXT: flat_store_byte v[3:4], v126 offset:159
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1448 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:157
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1452 ; 4-byte Folded Reload
@@ -14200,9 +14101,9 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1312 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1376 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[3:4], v1 offset:10
; ALIGNED-NEXT: flat_store_byte v[3:4], v5 offset:11
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1376 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:13
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1388 ; 4-byte Folded Reload
@@ -14253,23 +14154,23 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: s_mov_b32 s7, -1
; ALIGNED-NEXT: .LBB9_4: ; %memmove_bwd_loop
; ALIGNED-NEXT: ; =>This Inner Loop Header: Depth=1
-; ALIGNED-NEXT: s_clause 0x39
+; ALIGNED-NEXT: s_clause 0x3e
; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen offset:20
; ALIGNED-NEXT: buffer_load_ubyte v1, v4, s[0:3], 0 offen offset:21
; ALIGNED-NEXT: buffer_load_ubyte v2, v4, s[0:3], 0 offen offset:22
; ALIGNED-NEXT: buffer_load_ubyte v3, v4, s[0:3], 0 offen offset:23
-; ALIGNED-NEXT: buffer_load_ubyte v6, v4, s[0:3], 0 offen offset:24
-; ALIGNED-NEXT: buffer_load_ubyte v10, v4, s[0:3], 0 offen offset:25
-; ALIGNED-NEXT: buffer_load_ubyte v12, v4, s[0:3], 0 offen offset:26
; ALIGNED-NEXT: buffer_load_ubyte v125, v4, s[0:3], 0 offen offset:19
; ALIGNED-NEXT: buffer_load_ubyte v5, v4, s[0:3], 0 offen offset:28
; ALIGNED-NEXT: buffer_load_ubyte v7, v4, s[0:3], 0 offen offset:29
; ALIGNED-NEXT: buffer_load_ubyte v8, v4, s[0:3], 0 offen offset:30
; ALIGNED-NEXT: buffer_load_ubyte v9, v4, s[0:3], 0 offen offset:31
+; ALIGNED-NEXT: buffer_load_ubyte v6, v4, s[0:3], 0 offen offset:24
+; ALIGNED-NEXT: buffer_load_ubyte v10, v4, s[0:3], 0 offen offset:25
+; ALIGNED-NEXT: buffer_load_ubyte v12, v4, s[0:3], 0 offen offset:26
+; ALIGNED-NEXT: buffer_load_ubyte v11, v4, s[0:3], 0 offen offset:27
; ALIGNED-NEXT: buffer_load_ubyte v14, v4, s[0:3], 0 offen offset:32
; ALIGNED-NEXT: buffer_load_ubyte v15, v4, s[0:3], 0 offen offset:33
; ALIGNED-NEXT: buffer_load_ubyte v17, v4, s[0:3], 0 offen offset:34
-; ALIGNED-NEXT: buffer_load_ubyte v11, v4, s[0:3], 0 offen offset:27
; ALIGNED-NEXT: buffer_load_ubyte v19, v4, s[0:3], 0 offen offset:35
; ALIGNED-NEXT: buffer_load_ubyte v13, v4, s[0:3], 0 offen offset:36
; ALIGNED-NEXT: buffer_load_ubyte v16, v4, s[0:3], 0 offen offset:37
@@ -14291,17 +14192,17 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_ubyte v37, v4, s[0:3], 0 offen offset:53
; ALIGNED-NEXT: buffer_load_ubyte v35, v4, s[0:3], 0 offen offset:54
; ALIGNED-NEXT: buffer_load_ubyte v36, v4, s[0:3], 0 offen offset:55
-; ALIGNED-NEXT: buffer_load_ubyte v48, v4, s[0:3], 0 offen offset:56
-; ALIGNED-NEXT: buffer_load_ubyte v51, v4, s[0:3], 0 offen offset:57
-; ALIGNED-NEXT: buffer_load_ubyte v52, v4, s[0:3], 0 offen offset:58
; ALIGNED-NEXT: buffer_load_ubyte v38, v4, s[0:3], 0 offen offset:60
; ALIGNED-NEXT: buffer_load_ubyte v50, v4, s[0:3], 0 offen offset:61
; ALIGNED-NEXT: buffer_load_ubyte v39, v4, s[0:3], 0 offen offset:62
; ALIGNED-NEXT: buffer_load_ubyte v49, v4, s[0:3], 0 offen offset:63
+; ALIGNED-NEXT: buffer_load_ubyte v48, v4, s[0:3], 0 offen offset:56
+; ALIGNED-NEXT: buffer_load_ubyte v51, v4, s[0:3], 0 offen offset:57
+; ALIGNED-NEXT: buffer_load_ubyte v52, v4, s[0:3], 0 offen offset:58
+; ALIGNED-NEXT: buffer_load_ubyte v53, v4, s[0:3], 0 offen offset:59
; ALIGNED-NEXT: buffer_load_ubyte v29, v4, s[0:3], 0 offen offset:64
; ALIGNED-NEXT: buffer_load_ubyte v55, v4, s[0:3], 0 offen offset:65
; ALIGNED-NEXT: buffer_load_ubyte v66, v4, s[0:3], 0 offen offset:66
-; ALIGNED-NEXT: buffer_load_ubyte v53, v4, s[0:3], 0 offen offset:59
; ALIGNED-NEXT: buffer_load_ubyte v67, v4, s[0:3], 0 offen offset:67
; ALIGNED-NEXT: buffer_load_ubyte v54, v4, s[0:3], 0 offen offset:68
; ALIGNED-NEXT: buffer_load_ubyte v64, v4, s[0:3], 0 offen offset:69
@@ -14309,57 +14210,97 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_ubyte v68, v4, s[0:3], 0 offen offset:71
; ALIGNED-NEXT: buffer_load_ubyte v69, v4, s[0:3], 0 offen offset:76
; ALIGNED-NEXT: buffer_load_ubyte v70, v4, s[0:3], 0 offen offset:77
-; ALIGNED-NEXT: buffer_load_ubyte v81, v4, s[0:3], 0 offen offset:75
; ALIGNED-NEXT: buffer_load_ubyte v71, v4, s[0:3], 0 offen offset:78
; ALIGNED-NEXT: buffer_load_ubyte v80, v4, s[0:3], 0 offen offset:79
-; ALIGNED-NEXT: s_waitcnt vmcnt(57)
+; ALIGNED-NEXT: buffer_load_ubyte v81, v4, s[0:3], 0 offen offset:75
+; ALIGNED-NEXT: buffer_load_ubyte v126, v4, s[0:3], 0 offen offset:159
+; ALIGNED-NEXT: buffer_load_ubyte v124, v4, s[0:3], 0 offen offset:155
+; ALIGNED-NEXT: buffer_load_ubyte v123, v4, s[0:3], 0 offen offset:152
+; ALIGNED-NEXT: buffer_load_ubyte v121, v4, s[0:3], 0 offen offset:153
+; ALIGNED-NEXT: buffer_load_ubyte v111, v4, s[0:3], 0 offen offset:154
+; ALIGNED-NEXT: s_clause 0x34
+; ALIGNED-NEXT: buffer_load_ubyte v108, v4, s[0:3], 0 offen offset:160
+; ALIGNED-NEXT: buffer_load_ubyte v105, v4, s[0:3], 0 offen offset:161
+; ALIGNED-NEXT: buffer_load_ubyte v93, v4, s[0:3], 0 offen offset:162
+; ALIGNED-NEXT: buffer_load_ubyte v92, v4, s[0:3], 0 offen offset:163
+; ALIGNED-NEXT: buffer_load_ubyte v107, v4, s[0:3], 0 offen offset:164
+; ALIGNED-NEXT: buffer_load_ubyte v95, v4, s[0:3], 0 offen offset:165
+; ALIGNED-NEXT: buffer_load_ubyte v94, v4, s[0:3], 0 offen offset:166
+; ALIGNED-NEXT: buffer_load_ubyte v91, v4, s[0:3], 0 offen offset:167
+; ALIGNED-NEXT: buffer_load_ubyte v89, v4, s[0:3], 0 offen offset:172
+; ALIGNED-NEXT: buffer_load_ubyte v79, v4, s[0:3], 0 offen offset:173
+; ALIGNED-NEXT: buffer_load_ubyte v78, v4, s[0:3], 0 offen offset:174
+; ALIGNED-NEXT: buffer_load_ubyte v77, v4, s[0:3], 0 offen offset:175
+; ALIGNED-NEXT: buffer_load_ubyte v75, v4, s[0:3], 0 offen offset:171
+; ALIGNED-NEXT: buffer_load_ubyte v74, v4, s[0:3], 0 offen offset:168
+; ALIGNED-NEXT: buffer_load_ubyte v72, v4, s[0:3], 0 offen offset:169
+; ALIGNED-NEXT: buffer_load_ubyte v63, v4, s[0:3], 0 offen offset:170
+; ALIGNED-NEXT: buffer_load_ubyte v61, v4, s[0:3], 0 offen offset:176
+; ALIGNED-NEXT: buffer_load_ubyte v59, v4, s[0:3], 0 offen offset:177
+; ALIGNED-NEXT: buffer_load_ubyte v47, v4, s[0:3], 0 offen offset:178
+; ALIGNED-NEXT: buffer_load_ubyte v56, v4, s[0:3], 0 offen offset:179
+; ALIGNED-NEXT: buffer_load_ubyte v60, v4, s[0:3], 0 offen offset:180
+; ALIGNED-NEXT: buffer_load_ubyte v57, v4, s[0:3], 0 offen offset:181
+; ALIGNED-NEXT: buffer_load_ubyte v58, v4, s[0:3], 0 offen offset:182
+; ALIGNED-NEXT: buffer_load_ubyte v46, v4, s[0:3], 0 offen offset:183
+; ALIGNED-NEXT: buffer_load_ubyte v44, v4, s[0:3], 0 offen offset:188
+; ALIGNED-NEXT: buffer_load_ubyte v43, v4, s[0:3], 0 offen offset:189
+; ALIGNED-NEXT: buffer_load_ubyte v42, v4, s[0:3], 0 offen offset:190
+; ALIGNED-NEXT: buffer_load_ubyte v41, v4, s[0:3], 0 offen offset:191
+; ALIGNED-NEXT: buffer_load_ubyte v40, v4, s[0:3], 0 offen offset:187
+; ALIGNED-NEXT: buffer_load_ubyte v119, v4, s[0:3], 0 offen offset:184
+; ALIGNED-NEXT: buffer_load_ubyte v118, v4, s[0:3], 0 offen offset:185
+; ALIGNED-NEXT: buffer_load_ubyte v117, v4, s[0:3], 0 offen offset:186
+; ALIGNED-NEXT: buffer_load_ubyte v115, v4, s[0:3], 0 offen offset:192
+; ALIGNED-NEXT: buffer_load_ubyte v112, v4, s[0:3], 0 offen offset:193
+; ALIGNED-NEXT: buffer_load_ubyte v101, v4, s[0:3], 0 offen offset:194
+; ALIGNED-NEXT: buffer_load_ubyte v100, v4, s[0:3], 0 offen offset:195
+; ALIGNED-NEXT: buffer_load_ubyte v113, v4, s[0:3], 0 offen offset:196
+; ALIGNED-NEXT: buffer_load_ubyte v103, v4, s[0:3], 0 offen offset:197
+; ALIGNED-NEXT: buffer_load_ubyte v102, v4, s[0:3], 0 offen offset:198
+; ALIGNED-NEXT: buffer_load_ubyte v99, v4, s[0:3], 0 offen offset:199
+; ALIGNED-NEXT: buffer_load_ubyte v97, v4, s[0:3], 0 offen offset:204
+; ALIGNED-NEXT: buffer_load_ubyte v87, v4, s[0:3], 0 offen offset:205
+; ALIGNED-NEXT: buffer_load_ubyte v96, v4, s[0:3], 0 offen offset:206
+; ALIGNED-NEXT: buffer_load_ubyte v86, v4, s[0:3], 0 offen offset:207
+; ALIGNED-NEXT: buffer_load_ubyte v85, v4, s[0:3], 0 offen offset:203
+; ALIGNED-NEXT: buffer_load_ubyte v84, v4, s[0:3], 0 offen offset:200
+; ALIGNED-NEXT: buffer_load_ubyte v83, v4, s[0:3], 0 offen offset:201
+; ALIGNED-NEXT: buffer_load_ubyte v82, v4, s[0:3], 0 offen offset:202
+; ALIGNED-NEXT: buffer_load_ubyte v120, v4, s[0:3], 0 offen offset:2
+; ALIGNED-NEXT: buffer_load_ubyte v104, v4, s[0:3], 0 offen offset:4
+; ALIGNED-NEXT: buffer_load_ubyte v109, v4, s[0:3], 0 offen offset:5
+; ALIGNED-NEXT: buffer_load_ubyte v110, v4, s[0:3], 0 offen offset:6
+; ALIGNED-NEXT: buffer_load_ubyte v122, v4, s[0:3], 0 offen offset:7
+; ALIGNED-NEXT: s_waitcnt vmcnt(62)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(56)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(55)
; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(54)
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(53)
-; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(52)
-; ALIGNED-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(51)
-; ALIGNED-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(49)
; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(48)
; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(47)
; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(46)
; ALIGNED-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(45)
+; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v1, v3, 8, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v7, 8, v5
-; ALIGNED-NEXT: s_waitcnt vmcnt(42)
-; ALIGNED-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v3, v9, 8, v8
-; ALIGNED-NEXT: s_waitcnt vmcnt(40)
; ALIGNED-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v5, v10, 8, v6
; ALIGNED-NEXT: v_lshl_or_b32 v6, v11, 8, v12
; ALIGNED-NEXT: v_lshl_or_b32 v7, v15, 8, v14
; ALIGNED-NEXT: v_lshl_or_b32 v8, v19, 8, v17
-; ALIGNED-NEXT: s_waitcnt vmcnt(39)
; ALIGNED-NEXT: v_lshl_or_b32 v9, v16, 8, v13
-; ALIGNED-NEXT: s_waitcnt vmcnt(37)
; ALIGNED-NEXT: v_lshl_or_b32 v10, v20, 8, v18
-; ALIGNED-NEXT: s_waitcnt vmcnt(35)
; ALIGNED-NEXT: v_lshl_or_b32 v11, v23, 8, v22
-; ALIGNED-NEXT: s_waitcnt vmcnt(33)
; ALIGNED-NEXT: v_lshl_or_b32 v12, v28, 8, v25
-; ALIGNED-NEXT: s_waitcnt vmcnt(31)
; ALIGNED-NEXT: v_lshl_or_b32 v13, v24, 8, v21
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(29)
; ALIGNED-NEXT: v_lshl_or_b32 v14, v27, 8, v26
; ALIGNED-NEXT: v_lshl_or_b32 v1, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v6, 16, v5
@@ -14368,75 +14309,88 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: v_lshl_or_b32 v6, v12, 16, v11
; ALIGNED-NEXT: v_lshl_or_b32 v7, v14, 16, v13
; ALIGNED-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(27)
; ALIGNED-NEXT: v_lshl_or_b32 v15, v31, 8, v30
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(25)
; ALIGNED-NEXT: v_lshl_or_b32 v0, v34, 8, v33
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(23)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v37, 8, v32
; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(21)
; ALIGNED-NEXT: v_lshl_or_b32 v2, v36, 8, v35
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(16)
; ALIGNED-NEXT: v_lshl_or_b32 v3, v50, 8, v38
; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(14)
; ALIGNED-NEXT: v_lshl_or_b32 v5, v49, 8, v39
; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v6, v51, 8, v48
; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(10)
; ALIGNED-NEXT: v_lshl_or_b32 v7, v53, 8, v52
; ALIGNED-NEXT: v_lshl_or_b32 v0, v0, 16, v15
; ALIGNED-NEXT: v_lshl_or_b32 v1, v2, 16, v1
; ALIGNED-NEXT: v_lshl_or_b32 v2, v5, 16, v3
+; ALIGNED-NEXT: s_clause 0x1
; ALIGNED-NEXT: buffer_load_ubyte v5, v4, s[0:3], 0 offen offset:85
+; ALIGNED-NEXT: buffer_load_ubyte v8, v4, s[0:3], 0 offen offset:87
; ALIGNED-NEXT: v_lshl_or_b32 v3, v7, 16, v6
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v55, 8, v29
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(10)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v67, 8, v66
; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(8)
; ALIGNED-NEXT: v_lshl_or_b32 v2, v64, 8, v54
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
; ALIGNED-NEXT: v_lshl_or_b32 v3, v68, 8, v65
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v6, v4, s[0:3], 0 offen offset:86
; ALIGNED-NEXT: buffer_load_ubyte v7, v4, s[0:3], 0 offen offset:82
-; ALIGNED-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_load_ubyte v6, v4, s[0:3], 0 offen offset:86
+; ALIGNED-NEXT: buffer_store_dword v66, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v1, v3, 16, v2
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:976 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
+; ALIGNED-NEXT: s_waitcnt vmcnt(62)
; ALIGNED-NEXT: v_lshl_or_b32 v0, v70, 8, v69
; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v3, v4, s[0:3], 0 offen offset:83
; ALIGNED-NEXT: buffer_load_ubyte v2, v4, s[0:3], 0 offen offset:74
+; ALIGNED-NEXT: buffer_load_ubyte v3, v4, s[0:3], 0 offen offset:83
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:988 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(5)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v80, 8, v71
-; ALIGNED-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v65, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v68, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v70, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v80, off, s[0:3], s32 offset:980 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: s_clause 0x5
; ALIGNED-NEXT: buffer_load_ubyte v1, v4, s[0:3], 0 offen offset:73
-; ALIGNED-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_load_ubyte v80, v4, s[0:3], 0 offen offset:212
+; ALIGNED-NEXT: buffer_load_ubyte v68, v4, s[0:3], 0 offen offset:213
+; ALIGNED-NEXT: buffer_load_ubyte v70, v4, s[0:3], 0 offen offset:214
+; ALIGNED-NEXT: buffer_load_ubyte v65, v4, s[0:3], 0 offen offset:215
+; ALIGNED-NEXT: buffer_load_ubyte v66, v4, s[0:3], 0 offen offset:211
+; ALIGNED-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:992 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen offset:72
-; ALIGNED-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v67, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v69, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v71, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: buffer_load_ubyte v71, v4, s[0:3], 0 offen offset:216
+; ALIGNED-NEXT: buffer_load_ubyte v67, v4, s[0:3], 0 offen offset:217
+; ALIGNED-NEXT: buffer_load_ubyte v53, v4, s[0:3], 0 offen offset:218
+; ALIGNED-NEXT: buffer_load_ubyte v52, v4, s[0:3], 0 offen offset:219
+; ALIGNED-NEXT: buffer_load_ubyte v69, v4, s[0:3], 0 offen offset:220
+; ALIGNED-NEXT: buffer_load_ubyte v55, v4, s[0:3], 0 offen offset:221
+; ALIGNED-NEXT: buffer_load_ubyte v54, v4, s[0:3], 0 offen offset:222
+; ALIGNED-NEXT: buffer_load_ubyte v51, v4, s[0:3], 0 offen offset:223
+; ALIGNED-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x2
+; ALIGNED-NEXT: buffer_load_ubyte v50, v4, s[0:3], 0 offen offset:208
+; ALIGNED-NEXT: buffer_load_ubyte v38, v4, s[0:3], 0 offen offset:209
+; ALIGNED-NEXT: buffer_load_ubyte v39, v4, s[0:3], 0 offen offset:210
; ALIGNED-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill
@@ -14445,52 +14399,83 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: buffer_load_ubyte v37, v4, s[0:3], 0 offen offset:224
+; ALIGNED-NEXT: buffer_load_ubyte v35, v4, s[0:3], 0 offen offset:225
+; ALIGNED-NEXT: buffer_load_ubyte v31, v4, s[0:3], 0 offen offset:226
+; ALIGNED-NEXT: buffer_load_ubyte v32, v4, s[0:3], 0 offen offset:227
+; ALIGNED-NEXT: buffer_load_ubyte v36, v4, s[0:3], 0 offen offset:228
+; ALIGNED-NEXT: buffer_load_ubyte v33, v4, s[0:3], 0 offen offset:229
+; ALIGNED-NEXT: buffer_load_ubyte v34, v4, s[0:3], 0 offen offset:230
+; ALIGNED-NEXT: buffer_load_ubyte v30, v4, s[0:3], 0 offen offset:231
+; ALIGNED-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v66, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v67, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v64, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v65, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v68, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v69, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v70, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v71, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v80, off, s[0:3], s32 offset:980 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_store_dword v125, off, s[0:3], s32 offset:1452 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x4
+; ALIGNED-NEXT: buffer_load_ubyte v29, v4, s[0:3], 0 offen offset:236
+; ALIGNED-NEXT: buffer_load_ubyte v27, v4, s[0:3], 0 offen offset:237
+; ALIGNED-NEXT: buffer_load_ubyte v28, v4, s[0:3], 0 offen offset:238
+; ALIGNED-NEXT: buffer_load_ubyte v26, v4, s[0:3], 0 offen offset:239
+; ALIGNED-NEXT: buffer_load_ubyte v23, v4, s[0:3], 0 offen offset:235
+; ALIGNED-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x2
+; ALIGNED-NEXT: buffer_load_ubyte v24, v4, s[0:3], 0 offen offset:232
+; ALIGNED-NEXT: buffer_load_ubyte v22, v4, s[0:3], 0 offen offset:233
+; ALIGNED-NEXT: buffer_load_ubyte v21, v4, s[0:3], 0 offen offset:234
+; ALIGNED-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_clause 0x7
+; ALIGNED-NEXT: buffer_load_ubyte v19, v4, s[0:3], 0 offen offset:240
+; ALIGNED-NEXT: buffer_load_ubyte v17, v4, s[0:3], 0 offen offset:241
+; ALIGNED-NEXT: buffer_load_ubyte v13, v4, s[0:3], 0 offen offset:242
+; ALIGNED-NEXT: buffer_load_ubyte v14, v4, s[0:3], 0 offen offset:243
+; ALIGNED-NEXT: buffer_load_ubyte v18, v4, s[0:3], 0 offen offset:244
+; ALIGNED-NEXT: buffer_load_ubyte v15, v4, s[0:3], 0 offen offset:245
+; ALIGNED-NEXT: buffer_load_ubyte v16, v4, s[0:3], 0 offen offset:246
+; ALIGNED-NEXT: buffer_load_ubyte v12, v4, s[0:3], 0 offen offset:247
; ALIGNED-NEXT: buffer_store_dword v81, off, s[0:3], s32 offset:1000 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_load_ubyte v8, v4, s[0:3], 0 offen offset:87
-; ALIGNED-NEXT: s_waitcnt vmcnt(7)
+; ALIGNED-NEXT: buffer_store_dword v64, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(51)
+; ALIGNED-NEXT: buffer_store_dword v104, off, s[0:3], s32 offset:1364 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(50)
+; ALIGNED-NEXT: buffer_store_dword v109, off, s[0:3], s32 offset:1380 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(49)
+; ALIGNED-NEXT: buffer_store_dword v110, off, s[0:3], s32 offset:1384 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(48)
+; ALIGNED-NEXT: buffer_store_dword v122, off, s[0:3], s32 offset:1392 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(47)
; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1032 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
+; ALIGNED-NEXT: s_waitcnt vmcnt(46)
+; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:1040 ; 4-byte Folded Spill
+; ALIGNED-NEXT: s_waitcnt vmcnt(44)
; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:1036 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(5)
-; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:1028 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(4)
-; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1020 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
+; ALIGNED-NEXT: s_waitcnt vmcnt(43)
; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:1004 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
+; ALIGNED-NEXT: s_waitcnt vmcnt(41)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:996 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
+; ALIGNED-NEXT: s_waitcnt vmcnt(35)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:984 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v81, 8, v2
; ALIGNED-NEXT: buffer_load_ubyte v2, v4, s[0:3], 0 offen offset:84
+; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1020 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:1028 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: buffer_load_ubyte v1, v4, s[0:3], 0 offen offset:81
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1008 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen offset:80
; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:1040 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v62, v12, 8, v16
; ALIGNED-NEXT: s_waitcnt vmcnt(2)
; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:1024 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(1)
@@ -14763,8 +14748,15 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1332 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v3, 8, v7
+; ALIGNED-NEXT: v_lshl_or_b32 v3, v100, 8, v101
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; ALIGNED-NEXT: v_lshl_or_b32 v1, v8, 8, v6
+; ALIGNED-NEXT: s_clause 0x4
+; ALIGNED-NEXT: buffer_load_ubyte v11, v4, s[0:3], 0 offen offset:252
+; ALIGNED-NEXT: buffer_load_ubyte v9, v4, s[0:3], 0 offen offset:253
+; ALIGNED-NEXT: buffer_load_ubyte v10, v4, s[0:3], 0 offen offset:254
+; ALIGNED-NEXT: buffer_load_ubyte v8, v4, s[0:3], 0 offen offset:255
+; ALIGNED-NEXT: buffer_load_ubyte v7, v4, s[0:3], 0 offen offset:251
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v5, 8, v2
; ALIGNED-NEXT: buffer_load_ubyte v2, v4, s[0:3], 0 offen offset:158
@@ -14772,250 +14764,110 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_ubyte v1, v4, s[0:3], 0 offen offset:157
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1408 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen offset:156
+; ALIGNED-NEXT: s_waitcnt vmcnt(4)
+; ALIGNED-NEXT: v_lshl_or_b32 v76, v8, 8, v10
; ALIGNED-NEXT: s_waitcnt vmcnt(2)
; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:1420 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1416 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1404 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x1
-; ALIGNED-NEXT: buffer_load_ubyte v126, v4, s[0:3], 0 offen offset:159
-; ALIGNED-NEXT: buffer_load_ubyte v124, v4, s[0:3], 0 offen offset:155
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v126, 8, v2
+; ALIGNED-NEXT: v_lshl_or_b32 v2, v112, 8, v115
; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x2
-; ALIGNED-NEXT: buffer_load_ubyte v123, v4, s[0:3], 0 offen offset:152
-; ALIGNED-NEXT: buffer_load_ubyte v121, v4, s[0:3], 0 offen offset:153
-; ALIGNED-NEXT: buffer_load_ubyte v111, v4, s[0:3], 0 offen offset:154
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v121, 8, v123
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: v_lshl_or_b32 v1, v124, 8, v111
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x7
-; ALIGNED-NEXT: buffer_load_ubyte v108, v4, s[0:3], 0 offen offset:160
-; ALIGNED-NEXT: buffer_load_ubyte v105, v4, s[0:3], 0 offen offset:161
-; ALIGNED-NEXT: buffer_load_ubyte v93, v4, s[0:3], 0 offen offset:162
-; ALIGNED-NEXT: buffer_load_ubyte v92, v4, s[0:3], 0 offen offset:163
-; ALIGNED-NEXT: buffer_load_ubyte v107, v4, s[0:3], 0 offen offset:164
-; ALIGNED-NEXT: buffer_load_ubyte v95, v4, s[0:3], 0 offen offset:165
-; ALIGNED-NEXT: buffer_load_ubyte v94, v4, s[0:3], 0 offen offset:166
-; ALIGNED-NEXT: buffer_load_ubyte v91, v4, s[0:3], 0 offen offset:167
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v105, 8, v108
-; ALIGNED-NEXT: s_waitcnt vmcnt(4)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v92, 8, v93
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v91, 8, v94
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v95, 8, v107
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x4
-; ALIGNED-NEXT: buffer_load_ubyte v89, v4, s[0:3], 0 offen offset:172
-; ALIGNED-NEXT: buffer_load_ubyte v79, v4, s[0:3], 0 offen offset:173
-; ALIGNED-NEXT: buffer_load_ubyte v78, v4, s[0:3], 0 offen offset:174
-; ALIGNED-NEXT: buffer_load_ubyte v77, v4, s[0:3], 0 offen offset:175
-; ALIGNED-NEXT: buffer_load_ubyte v75, v4, s[0:3], 0 offen offset:171
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v79, 8, v89
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v77, 8, v78
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x2
-; ALIGNED-NEXT: buffer_load_ubyte v74, v4, s[0:3], 0 offen offset:168
-; ALIGNED-NEXT: buffer_load_ubyte v72, v4, s[0:3], 0 offen offset:169
-; ALIGNED-NEXT: buffer_load_ubyte v63, v4, s[0:3], 0 offen offset:170
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v72, 8, v74
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v75, 8, v63
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x7
-; ALIGNED-NEXT: buffer_load_ubyte v61, v4, s[0:3], 0 offen offset:176
-; ALIGNED-NEXT: buffer_load_ubyte v59, v4, s[0:3], 0 offen offset:177
-; ALIGNED-NEXT: buffer_load_ubyte v47, v4, s[0:3], 0 offen offset:178
-; ALIGNED-NEXT: buffer_load_ubyte v56, v4, s[0:3], 0 offen offset:179
-; ALIGNED-NEXT: buffer_load_ubyte v60, v4, s[0:3], 0 offen offset:180
-; ALIGNED-NEXT: buffer_load_ubyte v57, v4, s[0:3], 0 offen offset:181
-; ALIGNED-NEXT: buffer_load_ubyte v58, v4, s[0:3], 0 offen offset:182
-; ALIGNED-NEXT: buffer_load_ubyte v46, v4, s[0:3], 0 offen offset:183
-; ALIGNED-NEXT: s_waitcnt vmcnt(6)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v59, 8, v61
-; ALIGNED-NEXT: s_waitcnt vmcnt(4)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v56, 8, v47
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v46, 8, v58
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v57, 8, v60
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x4
-; ALIGNED-NEXT: buffer_load_ubyte v44, v4, s[0:3], 0 offen offset:188
-; ALIGNED-NEXT: buffer_load_ubyte v43, v4, s[0:3], 0 offen offset:189
-; ALIGNED-NEXT: buffer_load_ubyte v42, v4, s[0:3], 0 offen offset:190
-; ALIGNED-NEXT: buffer_load_ubyte v41, v4, s[0:3], 0 offen offset:191
-; ALIGNED-NEXT: buffer_load_ubyte v40, v4, s[0:3], 0 offen offset:187
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v44
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v41, 8, v42
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x2
-; ALIGNED-NEXT: buffer_load_ubyte v119, v4, s[0:3], 0 offen offset:184
-; ALIGNED-NEXT: buffer_load_ubyte v118, v4, s[0:3], 0 offen offset:185
-; ALIGNED-NEXT: buffer_load_ubyte v117, v4, s[0:3], 0 offen offset:186
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v118, 8, v119
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: v_lshl_or_b32 v1, v40, 8, v117
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x3e
-; ALIGNED-NEXT: buffer_load_ubyte v115, v4, s[0:3], 0 offen offset:192
-; ALIGNED-NEXT: buffer_load_ubyte v112, v4, s[0:3], 0 offen offset:193
-; ALIGNED-NEXT: buffer_load_ubyte v101, v4, s[0:3], 0 offen offset:194
-; ALIGNED-NEXT: buffer_load_ubyte v100, v4, s[0:3], 0 offen offset:195
-; ALIGNED-NEXT: buffer_load_ubyte v113, v4, s[0:3], 0 offen offset:196
-; ALIGNED-NEXT: buffer_load_ubyte v103, v4, s[0:3], 0 offen offset:197
-; ALIGNED-NEXT: buffer_load_ubyte v102, v4, s[0:3], 0 offen offset:198
-; ALIGNED-NEXT: buffer_load_ubyte v99, v4, s[0:3], 0 offen offset:199
-; ALIGNED-NEXT: buffer_load_ubyte v97, v4, s[0:3], 0 offen offset:204
-; ALIGNED-NEXT: buffer_load_ubyte v87, v4, s[0:3], 0 offen offset:205
-; ALIGNED-NEXT: buffer_load_ubyte v96, v4, s[0:3], 0 offen offset:206
-; ALIGNED-NEXT: buffer_load_ubyte v86, v4, s[0:3], 0 offen offset:207
-; ALIGNED-NEXT: buffer_load_ubyte v85, v4, s[0:3], 0 offen offset:203
-; ALIGNED-NEXT: buffer_load_ubyte v84, v4, s[0:3], 0 offen offset:200
-; ALIGNED-NEXT: buffer_load_ubyte v83, v4, s[0:3], 0 offen offset:201
-; ALIGNED-NEXT: buffer_load_ubyte v82, v4, s[0:3], 0 offen offset:202
-; ALIGNED-NEXT: buffer_load_ubyte v80, v4, s[0:3], 0 offen offset:212
-; ALIGNED-NEXT: buffer_load_ubyte v68, v4, s[0:3], 0 offen offset:213
-; ALIGNED-NEXT: buffer_load_ubyte v70, v4, s[0:3], 0 offen offset:214
-; ALIGNED-NEXT: buffer_load_ubyte v65, v4, s[0:3], 0 offen offset:215
-; ALIGNED-NEXT: buffer_load_ubyte v66, v4, s[0:3], 0 offen offset:211
-; ALIGNED-NEXT: buffer_load_ubyte v71, v4, s[0:3], 0 offen offset:216
-; ALIGNED-NEXT: buffer_load_ubyte v67, v4, s[0:3], 0 offen offset:217
-; ALIGNED-NEXT: buffer_load_ubyte v53, v4, s[0:3], 0 offen offset:218
-; ALIGNED-NEXT: buffer_load_ubyte v52, v4, s[0:3], 0 offen offset:219
-; ALIGNED-NEXT: buffer_load_ubyte v69, v4, s[0:3], 0 offen offset:220
-; ALIGNED-NEXT: buffer_load_ubyte v55, v4, s[0:3], 0 offen offset:221
-; ALIGNED-NEXT: buffer_load_ubyte v54, v4, s[0:3], 0 offen offset:222
-; ALIGNED-NEXT: buffer_load_ubyte v51, v4, s[0:3], 0 offen offset:223
-; ALIGNED-NEXT: buffer_load_ubyte v50, v4, s[0:3], 0 offen offset:208
-; ALIGNED-NEXT: buffer_load_ubyte v38, v4, s[0:3], 0 offen offset:209
-; ALIGNED-NEXT: buffer_load_ubyte v39, v4, s[0:3], 0 offen offset:210
-; ALIGNED-NEXT: buffer_load_ubyte v37, v4, s[0:3], 0 offen offset:224
-; ALIGNED-NEXT: buffer_load_ubyte v35, v4, s[0:3], 0 offen offset:225
-; ALIGNED-NEXT: buffer_load_ubyte v31, v4, s[0:3], 0 offen offset:226
-; ALIGNED-NEXT: buffer_load_ubyte v32, v4, s[0:3], 0 offen offset:227
-; ALIGNED-NEXT: buffer_load_ubyte v36, v4, s[0:3], 0 offen offset:228
-; ALIGNED-NEXT: buffer_load_ubyte v33, v4, s[0:3], 0 offen offset:229
-; ALIGNED-NEXT: buffer_load_ubyte v34, v4, s[0:3], 0 offen offset:230
-; ALIGNED-NEXT: buffer_load_ubyte v30, v4, s[0:3], 0 offen offset:231
-; ALIGNED-NEXT: buffer_load_ubyte v29, v4, s[0:3], 0 offen offset:236
-; ALIGNED-NEXT: buffer_load_ubyte v27, v4, s[0:3], 0 offen offset:237
-; ALIGNED-NEXT: buffer_load_ubyte v28, v4, s[0:3], 0 offen offset:238
-; ALIGNED-NEXT: buffer_load_ubyte v26, v4, s[0:3], 0 offen offset:239
-; ALIGNED-NEXT: buffer_load_ubyte v23, v4, s[0:3], 0 offen offset:235
-; ALIGNED-NEXT: buffer_load_ubyte v24, v4, s[0:3], 0 offen offset:232
-; ALIGNED-NEXT: buffer_load_ubyte v22, v4, s[0:3], 0 offen offset:233
-; ALIGNED-NEXT: buffer_load_ubyte v21, v4, s[0:3], 0 offen offset:234
-; ALIGNED-NEXT: buffer_load_ubyte v19, v4, s[0:3], 0 offen offset:240
-; ALIGNED-NEXT: buffer_load_ubyte v17, v4, s[0:3], 0 offen offset:241
-; ALIGNED-NEXT: buffer_load_ubyte v13, v4, s[0:3], 0 offen offset:242
-; ALIGNED-NEXT: buffer_load_ubyte v14, v4, s[0:3], 0 offen offset:243
-; ALIGNED-NEXT: buffer_load_ubyte v18, v4, s[0:3], 0 offen offset:244
-; ALIGNED-NEXT: buffer_load_ubyte v15, v4, s[0:3], 0 offen offset:245
-; ALIGNED-NEXT: buffer_load_ubyte v16, v4, s[0:3], 0 offen offset:246
-; ALIGNED-NEXT: buffer_load_ubyte v12, v4, s[0:3], 0 offen offset:247
-; ALIGNED-NEXT: buffer_load_ubyte v11, v4, s[0:3], 0 offen offset:252
-; ALIGNED-NEXT: buffer_load_ubyte v9, v4, s[0:3], 0 offen offset:253
-; ALIGNED-NEXT: buffer_load_ubyte v10, v4, s[0:3], 0 offen offset:254
-; ALIGNED-NEXT: buffer_load_ubyte v8, v4, s[0:3], 0 offen offset:255
-; ALIGNED-NEXT: buffer_load_ubyte v7, v4, s[0:3], 0 offen offset:251
-; ALIGNED-NEXT: buffer_load_ubyte v6, v4, s[0:3], 0 offen offset:248
-; ALIGNED-NEXT: buffer_load_ubyte v5, v4, s[0:3], 0 offen offset:249
-; ALIGNED-NEXT: s_clause 0x6
-; ALIGNED-NEXT: buffer_load_ubyte v1, v4, s[0:3], 0 offen offset:250
-; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen
-; ALIGNED-NEXT: buffer_load_ubyte v120, v4, s[0:3], 0 offen offset:2
-; ALIGNED-NEXT: buffer_load_ubyte v104, v4, s[0:3], 0 offen offset:4
-; ALIGNED-NEXT: buffer_load_ubyte v109, v4, s[0:3], 0 offen offset:5
-; ALIGNED-NEXT: buffer_load_ubyte v110, v4, s[0:3], 0 offen offset:6
-; ALIGNED-NEXT: buffer_load_ubyte v122, v4, s[0:3], 0 offen offset:7
-; ALIGNED-NEXT: s_waitcnt vmcnt(62)
-; ALIGNED-NEXT: v_lshl_or_b32 v2, v112, 8, v115
-; ALIGNED-NEXT: v_lshl_or_b32 v3, v100, 8, v101
; ALIGNED-NEXT: v_lshl_or_b32 v106, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v103, 8, v113
; ALIGNED-NEXT: v_lshl_or_b32 v3, v99, 8, v102
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v121, 8, v123
; ALIGNED-NEXT: v_lshl_or_b32 v90, v3, 16, v2
-; ALIGNED-NEXT: s_waitcnt vmcnt(60)
; ALIGNED-NEXT: v_lshl_or_b32 v2, v87, 8, v97
-; ALIGNED-NEXT: s_waitcnt vmcnt(58)
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v92, 8, v93
; ALIGNED-NEXT: v_lshl_or_b32 v3, v86, 8, v96
-; ALIGNED-NEXT: s_waitcnt vmcnt(14)
-; ALIGNED-NEXT: v_lshl_or_b32 v62, v12, 8, v16
-; ALIGNED-NEXT: s_waitcnt vmcnt(10)
-; ALIGNED-NEXT: v_lshl_or_b32 v76, v8, 8, v10
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: buffer_store_dword v104, off, s[0:3], s32 offset:1364 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v105, 8, v108
; ALIGNED-NEXT: v_lshl_or_b32 v88, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v83, 8, v84
; ALIGNED-NEXT: v_lshl_or_b32 v3, v85, 8, v82
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
-; ALIGNED-NEXT: buffer_store_dword v109, off, s[0:3], s32 offset:1380 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: buffer_store_dword v110, off, s[0:3], s32 offset:1384 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: buffer_store_dword v122, off, s[0:3], s32 offset:1392 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v91, 8, v94
; ALIGNED-NEXT: v_lshl_or_b32 v73, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v68, 8, v80
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v95, 8, v107
; ALIGNED-NEXT: v_lshl_or_b32 v3, v65, 8, v70
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v77, 8, v78
; ALIGNED-NEXT: v_lshl_or_b32 v45, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v67, 8, v71
; ALIGNED-NEXT: v_lshl_or_b32 v3, v52, 8, v53
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v79, 8, v89
; ALIGNED-NEXT: v_lshl_or_b32 v116, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v55, 8, v69
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v75, 8, v63
; ALIGNED-NEXT: v_lshl_or_b32 v3, v51, 8, v54
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v72, 8, v74
; ALIGNED-NEXT: v_lshl_or_b32 v114, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v38, 8, v50
; ALIGNED-NEXT: v_lshl_or_b32 v3, v66, 8, v39
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v56, 8, v47
; ALIGNED-NEXT: v_lshl_or_b32 v98, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v35, 8, v37
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v59, 8, v61
; ALIGNED-NEXT: v_lshl_or_b32 v3, v32, 8, v31
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v46, 8, v58
; ALIGNED-NEXT: v_lshl_or_b32 v81, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v33, 8, v36
; ALIGNED-NEXT: v_lshl_or_b32 v3, v30, 8, v34
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v57, 8, v60
; ALIGNED-NEXT: v_lshl_or_b32 v64, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v27, 8, v29
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v41, 8, v42
; ALIGNED-NEXT: v_lshl_or_b32 v3, v26, 8, v28
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v44
; ALIGNED-NEXT: v_lshl_or_b32 v49, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v22, 8, v24
; ALIGNED-NEXT: v_lshl_or_b32 v3, v23, 8, v21
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: v_lshl_or_b32 v1, v40, 8, v117
; ALIGNED-NEXT: v_lshl_or_b32 v48, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v2, v17, 8, v19
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v118, 8, v119
; ALIGNED-NEXT: v_lshl_or_b32 v3, v14, 8, v13
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; ALIGNED-NEXT: s_clause 0x2
+; ALIGNED-NEXT: buffer_load_ubyte v6, v4, s[0:3], 0 offen offset:248
+; ALIGNED-NEXT: buffer_load_ubyte v5, v4, s[0:3], 0 offen offset:249
+; ALIGNED-NEXT: buffer_load_ubyte v1, v4, s[0:3], 0 offen offset:250
; ALIGNED-NEXT: v_lshl_or_b32 v25, v3, 16, v2
; ALIGNED-NEXT: v_lshl_or_b32 v3, v15, 8, v18
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen
; ALIGNED-NEXT: v_lshl_or_b32 v20, v62, 16, v3
; ALIGNED-NEXT: v_lshl_or_b32 v62, v9, 8, v11
; ALIGNED-NEXT: v_lshl_or_b32 v3, v76, 16, v62
+; ALIGNED-NEXT: s_waitcnt vmcnt(2)
; ALIGNED-NEXT: v_lshl_or_b32 v62, v5, 8, v6
+; ALIGNED-NEXT: s_waitcnt vmcnt(1)
; ALIGNED-NEXT: v_lshl_or_b32 v76, v7, 8, v1
; ALIGNED-NEXT: v_lshl_or_b32 v2, v76, 16, v62
; ALIGNED-NEXT: s_clause 0x1
; ALIGNED-NEXT: buffer_load_ubyte v62, v4, s[0:3], 0 offen offset:1
; ALIGNED-NEXT: buffer_load_ubyte v76, v4, s[0:3], 0 offen offset:3
+; ALIGNED-NEXT: s_waitcnt vmcnt(2)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1336 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v120, off, s[0:3], s32 offset:1368 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(1)
@@ -15027,48 +14879,47 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 16, v62
; ALIGNED-NEXT: v_lshl_or_b32 v62, v109, 8, v104
; ALIGNED-NEXT: v_lshl_or_b32 v76, v122, 8, v110
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 16, v62
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1412 ; 4-byte Folded Spill
-; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen offset:12
-; ALIGNED-NEXT: s_waitcnt vmcnt(0)
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1424 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_clause 0x3
-; ALIGNED-NEXT: buffer_load_ubyte v127, v4, s[0:3], 0 offen offset:13
-; ALIGNED-NEXT: buffer_load_ubyte v76, v4, s[0:3], 0 offen offset:14
+; ALIGNED-NEXT: s_clause 0x1
; ALIGNED-NEXT: buffer_load_ubyte v104, v4, s[0:3], 0 offen offset:15
; ALIGNED-NEXT: buffer_load_ubyte v120, v4, s[0:3], 0 offen offset:11
-; ALIGNED-NEXT: s_waitcnt vmcnt(3)
-; ALIGNED-NEXT: v_lshl_or_b32 v62, v127, 8, v0
-; ALIGNED-NEXT: s_waitcnt vmcnt(2)
-; ALIGNED-NEXT: buffer_store_dword v76, off, s[0:3], s32 offset:1428 ; 4-byte Folded Spill
-; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v76, v104, 8, v76
-; ALIGNED-NEXT: buffer_store_dword v104, off, s[0:3], s32 offset:1432 ; 4-byte Folded Spill
-; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 16, v62
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1440 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_clause 0x2
; ALIGNED-NEXT: buffer_load_ubyte v122, v4, s[0:3], 0 offen offset:8
; ALIGNED-NEXT: buffer_load_ubyte v110, v4, s[0:3], 0 offen offset:9
; ALIGNED-NEXT: buffer_load_ubyte v109, v4, s[0:3], 0 offen offset:10
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 16, v62
+; ALIGNED-NEXT: s_clause 0x1
+; ALIGNED-NEXT: buffer_load_ubyte v127, v4, s[0:3], 0 offen offset:13
+; ALIGNED-NEXT: buffer_load_ubyte v76, v4, s[0:3], 0 offen offset:14
+; ALIGNED-NEXT: buffer_store_dword v125, off, s[0:3], s32 offset:1452 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1412 ; 4-byte Folded Spill
+; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen offset:12
+; ALIGNED-NEXT: s_waitcnt vmcnt(7)
+; ALIGNED-NEXT: buffer_store_dword v104, off, s[0:3], s32 offset:1432 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_waitcnt vmcnt(1)
-; ALIGNED-NEXT: v_lshl_or_b32 v62, v110, 8, v122
+; ALIGNED-NEXT: buffer_store_dword v76, off, s[0:3], s32 offset:1428 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v76, v104, 8, v76
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
+; ALIGNED-NEXT: v_lshl_or_b32 v62, v127, 8, v0
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1424 ; 4-byte Folded Spill
+; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 16, v62
+; ALIGNED-NEXT: v_lshl_or_b32 v62, v110, 8, v122
; ALIGNED-NEXT: v_lshl_or_b32 v76, v120, 8, v109
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1440 ; 4-byte Folded Spill
; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 16, v62
-; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1448 ; 4-byte Folded Spill
; ALIGNED-NEXT: s_clause 0x2
; ALIGNED-NEXT: buffer_load_ubyte v62, v4, s[0:3], 0 offen offset:18
; ALIGNED-NEXT: buffer_load_ubyte v104, v4, s[0:3], 0 offen offset:16
; ALIGNED-NEXT: buffer_load_ubyte v76, v4, s[0:3], 0 offen offset:17
+; ALIGNED-NEXT: v_add_nc_u32_e32 v4, 0xffffff00, v4
+; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1448 ; 4-byte Folded Spill
; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:488
; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:492
; ALIGNED-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:484
; ALIGNED-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:480
-; ALIGNED-NEXT: s_clause 0x1
+; ALIGNED-NEXT: s_clause 0x1 ; 8-byte Folded Reload
; ALIGNED-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704
; ALIGNED-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:708
-; ALIGNED-NEXT: v_add_nc_u32_e32 v4, 0xffffff00, v4
; ALIGNED-NEXT: s_waitcnt vmcnt(4)
; ALIGNED-NEXT: v_lshl_or_b32 v0, v125, 8, v62
; ALIGNED-NEXT: s_waitcnt vmcnt(2)
@@ -15137,6 +14988,8 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_store_dword v88, off, s[0:3], s32 offset:476
; ALIGNED-NEXT: buffer_store_dword v90, off, s[0:3], s32 offset:468
; ALIGNED-NEXT: buffer_store_dword v106, off, s[0:3], s32 offset:464
+; ALIGNED-NEXT: v_lshl_or_b32 v125, v0, 16, v125
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[2:3], v82 offset:202
; ALIGNED-NEXT: flat_store_byte v[2:3], v85 offset:203
; ALIGNED-NEXT: flat_store_byte v[2:3], v83 offset:201
@@ -15153,8 +15006,6 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: flat_store_byte v[2:3], v102 offset:198
; ALIGNED-NEXT: flat_store_byte v[2:3], v113 offset:196
; ALIGNED-NEXT: flat_store_byte v[2:3], v115 offset:192
-; ALIGNED-NEXT: v_lshl_or_b32 v125, v0, 16, v125
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_add_u32 s4, s4, 0xffffff00
; ALIGNED-NEXT: s_addc_u32 s5, s5, -1
; ALIGNED-NEXT: s_cmp_eq_u64 s[4:5], s[6:7]
@@ -15169,6 +15020,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[2:3], v117 offset:186
; ALIGNED-NEXT: flat_store_byte v[2:3], v40 offset:187
; ALIGNED-NEXT: flat_store_byte v[2:3], v118 offset:185
@@ -15185,7 +15037,6 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: flat_store_byte v[2:3], v58 offset:182
; ALIGNED-NEXT: flat_store_byte v[2:3], v60 offset:180
; ALIGNED-NEXT: flat_store_byte v[2:3], v61 offset:176
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Reload
@@ -15197,6 +15048,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[2:3], v63 offset:170
; ALIGNED-NEXT: flat_store_byte v[2:3], v75 offset:171
; ALIGNED-NEXT: flat_store_byte v[2:3], v72 offset:169
@@ -15213,7 +15065,6 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: flat_store_byte v[2:3], v94 offset:166
; ALIGNED-NEXT: flat_store_byte v[2:3], v107 offset:164
; ALIGNED-NEXT: flat_store_byte v[2:3], v108 offset:160
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Reload
@@ -15225,11 +15076,11 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1416 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[2:3], v111 offset:154
; ALIGNED-NEXT: flat_store_byte v[2:3], v124 offset:155
; ALIGNED-NEXT: flat_store_byte v[2:3], v121 offset:153
; ALIGNED-NEXT: flat_store_byte v[2:3], v126 offset:159
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1416 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:157
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1420 ; 4-byte Folded Reload
@@ -15747,11 +15598,11 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:656
+; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1432 ; 4-byte Folded Reload
; ALIGNED-NEXT: flat_store_byte v[2:3], v109 offset:10
; ALIGNED-NEXT: flat_store_byte v[2:3], v120 offset:11
; ALIGNED-NEXT: flat_store_byte v[2:3], v127 offset:13
; ALIGNED-NEXT: flat_store_byte v[2:3], v110 offset:9
-; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1432 ; 4-byte Folded Reload
; ALIGNED-NEXT: s_waitcnt vmcnt(0)
; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:15
; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1428 ; 4-byte Folded Reload
@@ -15788,7 +15639,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5
; ALIGNED-NEXT: s_cbranch_scc0 .LBB9_4
; ALIGNED-NEXT: .LBB9_5: ; %Flow11
; ALIGNED-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; ALIGNED-NEXT: s_clause 0x2f
+; ALIGNED-NEXT: s_clause 0x2f ; 192-byte Folded Reload
; ALIGNED-NEXT: buffer_load_dword v127, off, s[0:3], s32
; ALIGNED-NEXT: buffer_load_dword v126, off, s[0:3], s32 offset:4
; ALIGNED-NEXT: buffer_load_dword v125, off, s[0:3], s32 offset:8
diff --git a/llvm/test/CodeGen/AMDGPU/mixed-vmem-types.ll b/llvm/test/CodeGen/AMDGPU/mixed-vmem-types.ll
index 71900a4..3280048 100644
--- a/llvm/test/CodeGen/AMDGPU/mixed-vmem-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/mixed-vmem-types.ll
@@ -90,19 +90,19 @@ define amdgpu_cs void @mixed_vmem_types(i32 inreg %globalTable, i32 inreg %perSh
; GFX12-GISEL-NEXT: s_load_b256 s[20:27], s[2:3], 0x40
; GFX12-GISEL-NEXT: s_load_b512 s[36:51], s[2:3], 0x0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: image_sample_lz v1, v0, s[8:15], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX12-GISEL-NEXT: buffer_load_b32 v2, off, s[16:19], null
; GFX12-GISEL-NEXT: buffer_load_b32 v3, off, s[20:23], null
; GFX12-GISEL-NEXT: buffer_load_b32 v4, off, s[40:43], null
+; GFX12-GISEL-NEXT: image_sample_lz v1, v0, s[8:15], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX12-GISEL-NEXT: image_sample_lz v0, v0, s[44:51], s[36:39] dmask:0x1 dim:SQ_RSRC_IMG_2D a16
; GFX12-GISEL-NEXT: s_wait_loadcnt 0x2
; GFX12-GISEL-NEXT: v_cmp_eq_u32_e64 s0, 0xac0, v2
-; GFX12-GISEL-NEXT: s_wait_samplecnt 0x1
-; GFX12-GISEL-NEXT: v_cmp_eq_f32_e32 vcc_lo, 1.0, v1
; GFX12-GISEL-NEXT: s_wait_loadcnt 0x1
; GFX12-GISEL-NEXT: v_cmp_eq_u32_e64 s1, 0xac0, v3
; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX12-GISEL-NEXT: v_cmp_eq_u32_e64 s2, 0xac0, v4
+; GFX12-GISEL-NEXT: s_wait_samplecnt 0x1
+; GFX12-GISEL-NEXT: v_cmp_eq_f32_e32 vcc_lo, 1.0, v1
; GFX12-GISEL-NEXT: s_and_b32 s0, s0, vcc_lo
; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
; GFX12-GISEL-NEXT: v_cmp_eq_f32_e32 vcc_lo, 0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
index 78207c2..1177474 100644
--- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
+++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
@@ -185,44 +185,47 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX900: ; %bb.0:
; GFX900-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX900-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GFX900-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
+; GFX900-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
; GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; GFX900-NEXT: global_load_dwordx4 v[25:28], v0, s[0:1] offset:112
+; GFX900-NEXT: global_load_dwordx4 v[29:32], v0, s[0:1] offset:96
; GFX900-NEXT: global_load_dwordx4 v[1:4], v0, s[0:1] offset:16
; GFX900-NEXT: global_load_dwordx4 v[5:8], v0, s[0:1]
; GFX900-NEXT: global_load_dwordx4 v[9:12], v0, s[0:1] offset:48
; GFX900-NEXT: global_load_dwordx4 v[13:16], v0, s[0:1] offset:32
; GFX900-NEXT: global_load_dwordx4 v[17:20], v0, s[0:1] offset:80
; GFX900-NEXT: global_load_dwordx4 v[21:24], v0, s[0:1] offset:64
-; GFX900-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
-; GFX900-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
-; GFX900-NEXT: global_load_dwordx4 v[25:28], v0, s[0:1] offset:112
-; GFX900-NEXT: global_load_dwordx4 v[29:32], v0, s[0:1] offset:96
-; GFX900-NEXT: s_waitcnt vmcnt(7) lgkmcnt(0)
+; GFX900-NEXT: s_waitcnt vmcnt(5)
; GFX900-NEXT: v_add_f32_e32 v4, s43, v4
; GFX900-NEXT: v_add_f32_e32 v3, s42, v3
; GFX900-NEXT: v_add_f32_e32 v2, s41, v2
; GFX900-NEXT: v_add_f32_e32 v1, s40, v1
-; GFX900-NEXT: s_waitcnt vmcnt(6)
-; GFX900-NEXT: v_add_f32_e32 v8, s39, v8
-; GFX900-NEXT: v_add_f32_e32 v7, s38, v7
-; GFX900-NEXT: v_add_f32_e32 v6, s37, v6
-; GFX900-NEXT: s_waitcnt vmcnt(0)
; GFX900-NEXT: v_add_f32_e32 v32, s19, v32
; GFX900-NEXT: v_add_f32_e32 v31, s18, v31
; GFX900-NEXT: v_add_f32_e32 v30, s17, v30
; GFX900-NEXT: v_add_f32_e32 v29, s16, v29
+; GFX900-NEXT: s_waitcnt vmcnt(4)
+; GFX900-NEXT: v_add_f32_e32 v8, s39, v8
+; GFX900-NEXT: v_add_f32_e32 v7, s38, v7
+; GFX900-NEXT: v_add_f32_e32 v6, s37, v6
; GFX900-NEXT: v_add_f32_e32 v5, s36, v5
+; GFX900-NEXT: s_waitcnt vmcnt(3)
; GFX900-NEXT: v_add_f32_e32 v12, s51, v12
; GFX900-NEXT: v_add_f32_e32 v11, s50, v11
; GFX900-NEXT: v_add_f32_e32 v10, s49, v10
; GFX900-NEXT: v_add_f32_e32 v9, s48, v9
+; GFX900-NEXT: s_waitcnt vmcnt(2)
; GFX900-NEXT: v_add_f32_e32 v16, s47, v16
; GFX900-NEXT: v_add_f32_e32 v15, s46, v15
; GFX900-NEXT: v_add_f32_e32 v14, s45, v14
; GFX900-NEXT: v_add_f32_e32 v13, s44, v13
+; GFX900-NEXT: s_waitcnt vmcnt(1)
; GFX900-NEXT: v_add_f32_e32 v20, s15, v20
; GFX900-NEXT: v_add_f32_e32 v19, s14, v19
; GFX900-NEXT: v_add_f32_e32 v18, s13, v18
; GFX900-NEXT: v_add_f32_e32 v17, s12, v17
+; GFX900-NEXT: s_waitcnt vmcnt(0)
; GFX900-NEXT: v_add_f32_e32 v24, s11, v24
; GFX900-NEXT: v_add_f32_e32 v23, s10, v23
; GFX900-NEXT: v_add_f32_e32 v22, s9, v22
@@ -246,6 +249,8 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; PACKED-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; PACKED-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; PACKED-SDAG-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
+; PACKED-SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
; PACKED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; PACKED-SDAG-NEXT: global_load_dwordx4 v[0:3], v32, s[0:1] offset:16
; PACKED-SDAG-NEXT: global_load_dwordx4 v[4:7], v32, s[0:1]
@@ -255,9 +260,7 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-SDAG-NEXT: global_load_dwordx4 v[12:15], v32, s[0:1] offset:64
; PACKED-SDAG-NEXT: global_load_dwordx4 v[24:27], v32, s[0:1] offset:112
; PACKED-SDAG-NEXT: global_load_dwordx4 v[28:31], v32, s[0:1] offset:96
-; PACKED-SDAG-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
-; PACKED-SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
-; PACKED-SDAG-NEXT: s_waitcnt vmcnt(7) lgkmcnt(0)
+; PACKED-SDAG-NEXT: s_waitcnt vmcnt(7)
; PACKED-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[40:41]
; PACKED-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[42:43]
; PACKED-SDAG-NEXT: s_waitcnt vmcnt(6)
@@ -293,6 +296,8 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; PACKED-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; PACKED-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; PACKED-GISEL-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
+; PACKED-GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
; PACKED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; PACKED-GISEL-NEXT: global_load_dwordx4 v[0:3], v32, s[0:1]
; PACKED-GISEL-NEXT: global_load_dwordx4 v[4:7], v32, s[0:1] offset:16
@@ -302,9 +307,7 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-GISEL-NEXT: global_load_dwordx4 v[20:23], v32, s[0:1] offset:80
; PACKED-GISEL-NEXT: global_load_dwordx4 v[24:27], v32, s[0:1] offset:96
; PACKED-GISEL-NEXT: global_load_dwordx4 v[28:31], v32, s[0:1] offset:112
-; PACKED-GISEL-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
-; PACKED-GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
-; PACKED-GISEL-NEXT: s_waitcnt vmcnt(7) lgkmcnt(0)
+; PACKED-GISEL-NEXT: s_waitcnt vmcnt(7)
; PACKED-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[36:37]
; PACKED-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[38:39]
; PACKED-GISEL-NEXT: s_waitcnt vmcnt(6)
@@ -340,11 +343,14 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fadd_v32_vs:
; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_clause 0x2
; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_lshlrev_b32 v56, 7, v0 :: v_dual_mov_b32 v32, s40
; GFX1250-SDAG-NEXT: s_clause 0x7
; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16
; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48
@@ -354,22 +360,18 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96
; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64
; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
-; GFX1250-SDAG-NEXT: s_clause 0x1
-; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
-; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s40 :: v_dual_mov_b32 v33, s41
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s42 :: v_dual_mov_b32 v35, s43
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s38 :: v_dual_mov_b32 v39, s49
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s50 :: v_dual_mov_b32 v41, s51
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s44 :: v_dual_mov_b32 v37, s39
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s48 :: v_dual_mov_b32 v55, s23
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s11 :: v_dual_mov_b32 v52, s20
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s21 :: v_dual_mov_b32 v54, s22
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s15 :: v_dual_mov_b32 v50, s10
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s47 :: v_dual_mov_b32 v46, s12
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s13 :: v_dual_mov_b32 v48, s14
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s45 :: v_dual_mov_b32 v44, s46
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v33, s41 :: v_dual_mov_b32 v34, s42
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v35, s43 :: v_dual_mov_b32 v36, s38
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v39, s49 :: v_dual_mov_b32 v40, s50
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v41, s51 :: v_dual_mov_b32 v42, s44
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v37, s39 :: v_dual_mov_b32 v38, s48
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v55, s23 :: v_dual_mov_b32 v51, s11
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v52, s20 :: v_dual_mov_b32 v53, s21
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v54, s22 :: v_dual_mov_b32 v49, s15
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v50, s10 :: v_dual_mov_b32 v45, s47
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v46, s12 :: v_dual_mov_b32 v47, s13
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v48, s14 :: v_dual_mov_b32 v43, s45
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v44, s46
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33]
; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35]
@@ -409,6 +411,9 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -421,10 +426,6 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80
; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96
; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
-; GFX1250-GISEL-NEXT: s_clause 0x1
-; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
-; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
-; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37]
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39]
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41]
@@ -1442,44 +1443,47 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX900: ; %bb.0:
; GFX900-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX900-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GFX900-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
+; GFX900-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
; GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; GFX900-NEXT: global_load_dwordx4 v[25:28], v0, s[0:1] offset:112
+; GFX900-NEXT: global_load_dwordx4 v[29:32], v0, s[0:1] offset:96
; GFX900-NEXT: global_load_dwordx4 v[1:4], v0, s[0:1] offset:16
; GFX900-NEXT: global_load_dwordx4 v[5:8], v0, s[0:1]
; GFX900-NEXT: global_load_dwordx4 v[9:12], v0, s[0:1] offset:48
; GFX900-NEXT: global_load_dwordx4 v[13:16], v0, s[0:1] offset:32
; GFX900-NEXT: global_load_dwordx4 v[17:20], v0, s[0:1] offset:80
; GFX900-NEXT: global_load_dwordx4 v[21:24], v0, s[0:1] offset:64
-; GFX900-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
-; GFX900-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
-; GFX900-NEXT: global_load_dwordx4 v[25:28], v0, s[0:1] offset:112
-; GFX900-NEXT: global_load_dwordx4 v[29:32], v0, s[0:1] offset:96
-; GFX900-NEXT: s_waitcnt vmcnt(7) lgkmcnt(0)
+; GFX900-NEXT: s_waitcnt vmcnt(5)
; GFX900-NEXT: v_mul_f32_e32 v4, s43, v4
; GFX900-NEXT: v_mul_f32_e32 v3, s42, v3
; GFX900-NEXT: v_mul_f32_e32 v2, s41, v2
; GFX900-NEXT: v_mul_f32_e32 v1, s40, v1
-; GFX900-NEXT: s_waitcnt vmcnt(6)
-; GFX900-NEXT: v_mul_f32_e32 v8, s39, v8
-; GFX900-NEXT: v_mul_f32_e32 v7, s38, v7
-; GFX900-NEXT: v_mul_f32_e32 v6, s37, v6
-; GFX900-NEXT: s_waitcnt vmcnt(0)
; GFX900-NEXT: v_mul_f32_e32 v32, s19, v32
; GFX900-NEXT: v_mul_f32_e32 v31, s18, v31
; GFX900-NEXT: v_mul_f32_e32 v30, s17, v30
; GFX900-NEXT: v_mul_f32_e32 v29, s16, v29
+; GFX900-NEXT: s_waitcnt vmcnt(4)
+; GFX900-NEXT: v_mul_f32_e32 v8, s39, v8
+; GFX900-NEXT: v_mul_f32_e32 v7, s38, v7
+; GFX900-NEXT: v_mul_f32_e32 v6, s37, v6
; GFX900-NEXT: v_mul_f32_e32 v5, s36, v5
+; GFX900-NEXT: s_waitcnt vmcnt(3)
; GFX900-NEXT: v_mul_f32_e32 v12, s51, v12
; GFX900-NEXT: v_mul_f32_e32 v11, s50, v11
; GFX900-NEXT: v_mul_f32_e32 v10, s49, v10
; GFX900-NEXT: v_mul_f32_e32 v9, s48, v9
+; GFX900-NEXT: s_waitcnt vmcnt(2)
; GFX900-NEXT: v_mul_f32_e32 v16, s47, v16
; GFX900-NEXT: v_mul_f32_e32 v15, s46, v15
; GFX900-NEXT: v_mul_f32_e32 v14, s45, v14
; GFX900-NEXT: v_mul_f32_e32 v13, s44, v13
+; GFX900-NEXT: s_waitcnt vmcnt(1)
; GFX900-NEXT: v_mul_f32_e32 v20, s15, v20
; GFX900-NEXT: v_mul_f32_e32 v19, s14, v19
; GFX900-NEXT: v_mul_f32_e32 v18, s13, v18
; GFX900-NEXT: v_mul_f32_e32 v17, s12, v17
+; GFX900-NEXT: s_waitcnt vmcnt(0)
; GFX900-NEXT: v_mul_f32_e32 v24, s11, v24
; GFX900-NEXT: v_mul_f32_e32 v23, s10, v23
; GFX900-NEXT: v_mul_f32_e32 v22, s9, v22
@@ -1503,6 +1507,8 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; PACKED-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; PACKED-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; PACKED-SDAG-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
+; PACKED-SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
; PACKED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; PACKED-SDAG-NEXT: global_load_dwordx4 v[0:3], v32, s[0:1] offset:16
; PACKED-SDAG-NEXT: global_load_dwordx4 v[4:7], v32, s[0:1]
@@ -1512,9 +1518,7 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-SDAG-NEXT: global_load_dwordx4 v[12:15], v32, s[0:1] offset:64
; PACKED-SDAG-NEXT: global_load_dwordx4 v[24:27], v32, s[0:1] offset:112
; PACKED-SDAG-NEXT: global_load_dwordx4 v[28:31], v32, s[0:1] offset:96
-; PACKED-SDAG-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
-; PACKED-SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
-; PACKED-SDAG-NEXT: s_waitcnt vmcnt(7) lgkmcnt(0)
+; PACKED-SDAG-NEXT: s_waitcnt vmcnt(7)
; PACKED-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[40:41]
; PACKED-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[42:43]
; PACKED-SDAG-NEXT: s_waitcnt vmcnt(6)
@@ -1550,6 +1554,8 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; PACKED-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; PACKED-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; PACKED-GISEL-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
+; PACKED-GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
; PACKED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; PACKED-GISEL-NEXT: global_load_dwordx4 v[0:3], v32, s[0:1]
; PACKED-GISEL-NEXT: global_load_dwordx4 v[4:7], v32, s[0:1] offset:16
@@ -1559,9 +1565,7 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-GISEL-NEXT: global_load_dwordx4 v[20:23], v32, s[0:1] offset:80
; PACKED-GISEL-NEXT: global_load_dwordx4 v[24:27], v32, s[0:1] offset:96
; PACKED-GISEL-NEXT: global_load_dwordx4 v[28:31], v32, s[0:1] offset:112
-; PACKED-GISEL-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
-; PACKED-GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
-; PACKED-GISEL-NEXT: s_waitcnt vmcnt(7) lgkmcnt(0)
+; PACKED-GISEL-NEXT: s_waitcnt vmcnt(7)
; PACKED-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[36:37]
; PACKED-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[38:39]
; PACKED-GISEL-NEXT: s_waitcnt vmcnt(6)
@@ -1597,11 +1601,14 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fmul_v32_vs:
; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_clause 0x2
; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_lshlrev_b32 v56, 7, v0 :: v_dual_mov_b32 v32, s40
; GFX1250-SDAG-NEXT: s_clause 0x7
; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16
; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48
@@ -1611,22 +1618,18 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96
; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64
; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
-; GFX1250-SDAG-NEXT: s_clause 0x1
-; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
-; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s40 :: v_dual_mov_b32 v33, s41
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s42 :: v_dual_mov_b32 v35, s43
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s38 :: v_dual_mov_b32 v39, s49
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s50 :: v_dual_mov_b32 v41, s51
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s44 :: v_dual_mov_b32 v37, s39
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s48 :: v_dual_mov_b32 v55, s23
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s11 :: v_dual_mov_b32 v52, s20
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s21 :: v_dual_mov_b32 v54, s22
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s15 :: v_dual_mov_b32 v50, s10
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s47 :: v_dual_mov_b32 v46, s12
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s13 :: v_dual_mov_b32 v48, s14
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s45 :: v_dual_mov_b32 v44, s46
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v33, s41 :: v_dual_mov_b32 v34, s42
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v35, s43 :: v_dual_mov_b32 v36, s38
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v39, s49 :: v_dual_mov_b32 v40, s50
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v41, s51 :: v_dual_mov_b32 v42, s44
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v37, s39 :: v_dual_mov_b32 v38, s48
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v55, s23 :: v_dual_mov_b32 v51, s11
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v52, s20 :: v_dual_mov_b32 v53, s21
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v54, s22 :: v_dual_mov_b32 v49, s15
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v50, s10 :: v_dual_mov_b32 v45, s47
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v46, s12 :: v_dual_mov_b32 v47, s13
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v48, s14 :: v_dual_mov_b32 v43, s45
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v44, s46
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33]
; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35]
@@ -1666,6 +1669,9 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -1678,10 +1684,6 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80
; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96
; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
-; GFX1250-GISEL-NEXT: s_clause 0x1
-; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
-; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
-; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37]
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39]
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41]
@@ -2273,44 +2275,47 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX900: ; %bb.0:
; GFX900-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX900-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; GFX900-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
+; GFX900-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
; GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; GFX900-NEXT: global_load_dwordx4 v[25:28], v0, s[0:1] offset:112
+; GFX900-NEXT: global_load_dwordx4 v[29:32], v0, s[0:1] offset:96
; GFX900-NEXT: global_load_dwordx4 v[1:4], v0, s[0:1] offset:16
; GFX900-NEXT: global_load_dwordx4 v[5:8], v0, s[0:1]
; GFX900-NEXT: global_load_dwordx4 v[9:12], v0, s[0:1] offset:48
; GFX900-NEXT: global_load_dwordx4 v[13:16], v0, s[0:1] offset:32
; GFX900-NEXT: global_load_dwordx4 v[17:20], v0, s[0:1] offset:80
; GFX900-NEXT: global_load_dwordx4 v[21:24], v0, s[0:1] offset:64
-; GFX900-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
-; GFX900-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
-; GFX900-NEXT: global_load_dwordx4 v[25:28], v0, s[0:1] offset:112
-; GFX900-NEXT: global_load_dwordx4 v[29:32], v0, s[0:1] offset:96
-; GFX900-NEXT: s_waitcnt vmcnt(7) lgkmcnt(0)
+; GFX900-NEXT: s_waitcnt vmcnt(5)
; GFX900-NEXT: v_fma_f32 v4, v4, s43, s43
; GFX900-NEXT: v_fma_f32 v3, v3, s42, s42
; GFX900-NEXT: v_fma_f32 v2, v2, s41, s41
; GFX900-NEXT: v_fma_f32 v1, v1, s40, s40
-; GFX900-NEXT: s_waitcnt vmcnt(6)
-; GFX900-NEXT: v_fma_f32 v8, v8, s39, s39
-; GFX900-NEXT: v_fma_f32 v7, v7, s38, s38
-; GFX900-NEXT: v_fma_f32 v6, v6, s37, s37
-; GFX900-NEXT: s_waitcnt vmcnt(0)
; GFX900-NEXT: v_fma_f32 v32, v32, s19, s19
; GFX900-NEXT: v_fma_f32 v31, v31, s18, s18
; GFX900-NEXT: v_fma_f32 v30, v30, s17, s17
; GFX900-NEXT: v_fma_f32 v29, v29, s16, s16
+; GFX900-NEXT: s_waitcnt vmcnt(4)
+; GFX900-NEXT: v_fma_f32 v8, v8, s39, s39
+; GFX900-NEXT: v_fma_f32 v7, v7, s38, s38
+; GFX900-NEXT: v_fma_f32 v6, v6, s37, s37
; GFX900-NEXT: v_fma_f32 v5, v5, s36, s36
+; GFX900-NEXT: s_waitcnt vmcnt(3)
; GFX900-NEXT: v_fma_f32 v12, v12, s51, s51
; GFX900-NEXT: v_fma_f32 v11, v11, s50, s50
; GFX900-NEXT: v_fma_f32 v10, v10, s49, s49
; GFX900-NEXT: v_fma_f32 v9, v9, s48, s48
+; GFX900-NEXT: s_waitcnt vmcnt(2)
; GFX900-NEXT: v_fma_f32 v16, v16, s47, s47
; GFX900-NEXT: v_fma_f32 v15, v15, s46, s46
; GFX900-NEXT: v_fma_f32 v14, v14, s45, s45
; GFX900-NEXT: v_fma_f32 v13, v13, s44, s44
+; GFX900-NEXT: s_waitcnt vmcnt(1)
; GFX900-NEXT: v_fma_f32 v20, v20, s15, s15
; GFX900-NEXT: v_fma_f32 v19, v19, s14, s14
; GFX900-NEXT: v_fma_f32 v18, v18, s13, s13
; GFX900-NEXT: v_fma_f32 v17, v17, s12, s12
+; GFX900-NEXT: s_waitcnt vmcnt(0)
; GFX900-NEXT: v_fma_f32 v24, v24, s11, s11
; GFX900-NEXT: v_fma_f32 v23, v23, s10, s10
; GFX900-NEXT: v_fma_f32 v22, v22, s9, s9
@@ -2334,6 +2339,8 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; PACKED-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; PACKED-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; PACKED-SDAG-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
+; PACKED-SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
; PACKED-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; PACKED-SDAG-NEXT: global_load_dwordx4 v[0:3], v32, s[0:1] offset:16
; PACKED-SDAG-NEXT: global_load_dwordx4 v[4:7], v32, s[0:1]
@@ -2343,9 +2350,7 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-SDAG-NEXT: global_load_dwordx4 v[12:15], v32, s[0:1] offset:64
; PACKED-SDAG-NEXT: global_load_dwordx4 v[24:27], v32, s[0:1] offset:112
; PACKED-SDAG-NEXT: global_load_dwordx4 v[28:31], v32, s[0:1] offset:96
-; PACKED-SDAG-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
-; PACKED-SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
-; PACKED-SDAG-NEXT: s_waitcnt vmcnt(7) lgkmcnt(0)
+; PACKED-SDAG-NEXT: s_waitcnt vmcnt(7)
; PACKED-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[40:41], s[40:41]
; PACKED-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[42:43], s[42:43]
; PACKED-SDAG-NEXT: s_waitcnt vmcnt(6)
@@ -2381,6 +2386,8 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; PACKED-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; PACKED-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; PACKED-GISEL-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
+; PACKED-GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
; PACKED-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; PACKED-GISEL-NEXT: global_load_dwordx4 v[0:3], v32, s[0:1]
; PACKED-GISEL-NEXT: global_load_dwordx4 v[4:7], v32, s[0:1] offset:16
@@ -2390,9 +2397,7 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; PACKED-GISEL-NEXT: global_load_dwordx4 v[20:23], v32, s[0:1] offset:80
; PACKED-GISEL-NEXT: global_load_dwordx4 v[24:27], v32, s[0:1] offset:96
; PACKED-GISEL-NEXT: global_load_dwordx4 v[28:31], v32, s[0:1] offset:112
-; PACKED-GISEL-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0xa4
-; PACKED-GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0xe4
-; PACKED-GISEL-NEXT: s_waitcnt vmcnt(7) lgkmcnt(0)
+; PACKED-GISEL-NEXT: s_waitcnt vmcnt(7)
; PACKED-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[36:37], s[36:37]
; PACKED-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[38:39], s[38:39]
; PACKED-GISEL-NEXT: s_waitcnt vmcnt(6)
@@ -2430,6 +2435,9 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
@@ -2442,10 +2450,6 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96
; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64
; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
-; GFX1250-SDAG-NEXT: s_clause 0x1
-; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
-; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[40:41]
; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[42:43]
; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[50:51]
@@ -2496,6 +2500,9 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
+; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
@@ -2508,10 +2515,6 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80
; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96
; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112
-; GFX1250-GISEL-NEXT: s_clause 0x1
-; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4
-; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4
-; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37]
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39]
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41]
diff --git a/llvm/test/CodeGen/AMDGPU/postra-bundle-memops.mir b/llvm/test/CodeGen/AMDGPU/postra-bundle-memops.mir
index d0d5cc1..025d9e6 100644
--- a/llvm/test/CodeGen/AMDGPU/postra-bundle-memops.mir
+++ b/llvm/test/CodeGen/AMDGPU/postra-bundle-memops.mir
@@ -56,11 +56,11 @@ body: |
; GCN-NEXT: BUFFER_STORE_DWORD_ADDR64 $vgpr0, $vgpr2_vgpr3, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec
; GCN-NEXT: BUFFER_STORE_DWORD_ADDR64 $vgpr0, $vgpr2_vgpr3, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec
; GCN-NEXT: }
- ; GCN-NEXT: BUNDLE implicit-def $vgpr2, implicit-def $vgpr3, implicit undef $vgpr4_vgpr5_vgpr6_vgpr7, implicit undef $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $exec {
+ ; GCN-NEXT: BUNDLE implicit-def $vgpr2, implicit-def $vgpr3, implicit undef $vgpr4_vgpr5_vgpr6_vgpr7, implicit undef $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $exec :: (load (s32)) {
; GCN-NEXT: $vgpr2 = IMAGE_LOAD_V1_V4 undef $vgpr4_vgpr5_vgpr6_vgpr7, undef $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, 2, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (load (s32))
; GCN-NEXT: $vgpr3 = IMAGE_LOAD_V1_V4 undef $vgpr4_vgpr5_vgpr6_vgpr7, undef $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, 2, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (load (s32))
; GCN-NEXT: }
- ; GCN-NEXT: BUNDLE implicit undef $vgpr0_vgpr1_vgpr2_vgpr3, implicit $vgpr0_vgpr1, implicit undef $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $exec {
+ ; GCN-NEXT: BUNDLE implicit undef $vgpr0_vgpr1_vgpr2_vgpr3, implicit $vgpr0_vgpr1, implicit undef $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $exec :: (store (s128)) {
; GCN-NEXT: IMAGE_STORE_V4_V2 undef $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr0_vgpr1, undef $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, 15, -1, 1, 0, 0, 0, 0, 0, implicit $exec :: (store (s128))
; GCN-NEXT: IMAGE_STORE_V4_V2 undef $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr0_vgpr1, undef $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, 15, -1, 1, 0, 0, 0, 0, 0, implicit $exec :: (store (s128))
; GCN-NEXT: }
@@ -359,6 +359,7 @@ tracksRegLiveness: true
body: |
bb.0:
; GCN-LABLE: name: no_sched_barrier_within_bundle
+ ; GCN-LABEL: name: no_sched_barrier_within_bundle
; GCN: renamable $sgpr0_sgpr1 = IMPLICIT_DEF
; GCN-NEXT: renamable $vgpr0 = IMPLICIT_DEF
; GCN-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit $sgpr0_sgpr1, implicit $vgpr0, implicit $exec {
diff --git a/llvm/test/CodeGen/AMDGPU/postra-bundle-vimage-vsample-gfx12.mir b/llvm/test/CodeGen/AMDGPU/postra-bundle-vimage-vsample-gfx12.mir
index 5fea0ae..e0266b9 100644
--- a/llvm/test/CodeGen/AMDGPU/postra-bundle-vimage-vsample-gfx12.mir
+++ b/llvm/test/CodeGen/AMDGPU/postra-bundle-vimage-vsample-gfx12.mir
@@ -9,7 +9,7 @@ body: |
; GFX12-LABEL: name: post_bundle_vimage
; GFX12: liveins: $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUNDLE implicit-def $vgpr5, implicit-def $vgpr4, implicit killed $vgpr1, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $exec {
+ ; GFX12-NEXT: BUNDLE implicit-def $vgpr5, implicit-def $vgpr4, implicit killed $vgpr1, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $exec :: (dereferenceable invariant load (s32), addrspace 8) {
; GFX12-NEXT: $vgpr5 = IMAGE_LOAD_V1_V1_gfx12 $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, 2, 1, 0, 0, -1, 0, 0, implicit $exec :: (dereferenceable invariant load (s32), addrspace 8)
; GFX12-NEXT: $vgpr4 = IMAGE_LOAD_V1_V1_gfx12 killed $vgpr1, killed $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, 1, 1, 0, 0, -1, 0, 0, implicit $exec :: (dereferenceable invariant load (s32), addrspace 8)
; GFX12-NEXT: }
@@ -25,7 +25,7 @@ body: |
; GFX12-LABEL: name: post_bundle_vsample
; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11
; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUNDLE implicit-def $vgpr6_vgpr7_vgpr8_vgpr9, implicit-def $vgpr10_vgpr11_vgpr12_vgpr13, implicit killed $vgpr0, implicit killed $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec, implicit killed $vgpr2, implicit killed $vgpr3 {
+ ; GFX12-NEXT: BUNDLE implicit-def $vgpr6_vgpr7_vgpr8_vgpr9, implicit-def $vgpr10_vgpr11_vgpr12_vgpr13, implicit killed $vgpr0, implicit killed $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec, implicit killed $vgpr2, implicit killed $vgpr3 :: (dereferenceable load (s128), addrspace 8) {
; GFX12-NEXT: $vgpr6_vgpr7_vgpr8_vgpr9 = IMAGE_SAMPLE_V4_V2_gfx12 killed $vgpr0, killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, 15, 1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), addrspace 8)
; GFX12-NEXT: $vgpr10_vgpr11_vgpr12_vgpr13 = IMAGE_SAMPLE_V4_V2_gfx12 killed $vgpr2, killed $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9_sgpr10_sgpr11, 15, 1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), addrspace 8)
; GFX12-NEXT: }
diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index 85a9aba..b91bdd2 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -398,11 +398,11 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX8-NEXT: flat_load_dwordx2 v[18:19], v[4:5]
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0xffffc800, v2
; GFX8-NEXT: v_addc_u32_e32 v7, vcc, -1, v3, vcc
-; GFX8-NEXT: flat_load_dwordx2 v[6:7], v[6:7]
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffd000, v2
; GFX8-NEXT: v_addc_u32_e32 v5, vcc, -1, v3, vcc
; GFX8-NEXT: v_add_u32_e32 v20, vcc, 0xffffd800, v2
; GFX8-NEXT: v_addc_u32_e32 v21, vcc, -1, v3, vcc
+; GFX8-NEXT: flat_load_dwordx2 v[6:7], v[6:7]
; GFX8-NEXT: v_add_u32_e32 v22, vcc, 0xffffe000, v2
; GFX8-NEXT: v_addc_u32_e32 v23, vcc, -1, v3, vcc
; GFX8-NEXT: flat_load_dwordx2 v[8:9], v[4:5]
@@ -514,10 +514,8 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX900-NEXT: ; => This Inner Loop Header: Depth=2
; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, 0xffffb000, v2
; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, -1, v3, vcc
-; GFX900-NEXT: global_load_dwordx2 v[10:11], v[2:3], off offset:-4096
-; GFX900-NEXT: global_load_dwordx2 v[12:13], v[2:3], off offset:-2048
-; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v2
; GFX900-NEXT: global_load_dwordx2 v[8:9], v[8:9], off
+; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v2
; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v3, vcc
; GFX900-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048
; GFX900-NEXT: global_load_dwordx2 v[20:21], v[14:15], off
@@ -526,13 +524,15 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, s3, v2
; GFX900-NEXT: global_load_dwordx2 v[16:17], v[16:17], off offset:-2048
; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v3, vcc
+; GFX900-NEXT: global_load_dwordx2 v[10:11], v[2:3], off offset:-4096
+; GFX900-NEXT: global_load_dwordx2 v[12:13], v[2:3], off offset:-2048
; GFX900-NEXT: s_addk_i32 s5, 0x2000
; GFX900-NEXT: s_cmp_gt_u32 s5, 0x3fffff
-; GFX900-NEXT: s_waitcnt vmcnt(3)
+; GFX900-NEXT: s_waitcnt vmcnt(5)
; GFX900-NEXT: v_add_co_u32_e32 v22, vcc, v8, v4
; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc
; GFX900-NEXT: global_load_dwordx2 v[8:9], v[14:15], off offset:-4096
-; GFX900-NEXT: s_waitcnt vmcnt(3)
+; GFX900-NEXT: s_waitcnt vmcnt(5)
; GFX900-NEXT: v_add_co_u32_e64 v24, s[0:1], v18, v22
; GFX900-NEXT: v_addc_co_u32_e64 v25, s[0:1], v19, v5, s[0:1]
; GFX900-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048
@@ -540,13 +540,13 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, s4, v2
; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, -1, v3, vcc
; GFX900-NEXT: global_load_dwordx2 v[4:5], v[4:5], off offset:-2048
-; GFX900-NEXT: s_waitcnt vmcnt(5)
+; GFX900-NEXT: s_waitcnt vmcnt(7)
; GFX900-NEXT: v_add_co_u32_e32 v20, vcc, v20, v24
; GFX900-NEXT: global_load_dwordx2 v[14:15], v[2:3], off
; GFX900-NEXT: v_addc_co_u32_e32 v21, vcc, v21, v25, vcc
; GFX900-NEXT: v_add_co_u32_e32 v2, vcc, 0x10000, v2
; GFX900-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
-; GFX900-NEXT: s_waitcnt vmcnt(5)
+; GFX900-NEXT: s_waitcnt vmcnt(7)
; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, v16, v20
; GFX900-NEXT: v_addc_co_u32_e32 v17, vcc, v17, v21, vcc
; GFX900-NEXT: s_waitcnt vmcnt(4)
@@ -734,10 +734,8 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_add_co_u32_e32 v12, vcc, 0xffffb000, v6
; GFX90A-NEXT: v_addc_co_u32_e32 v13, vcc, -1, v7, vcc
-; GFX90A-NEXT: global_load_dwordx2 v[8:9], v[6:7], off offset:-4096
-; GFX90A-NEXT: global_load_dwordx2 v[10:11], v[6:7], off offset:-2048
-; GFX90A-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v6
; GFX90A-NEXT: global_load_dwordx2 v[12:13], v[12:13], off
+; GFX90A-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v6
; GFX90A-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v7, vcc
; GFX90A-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048
; GFX90A-NEXT: global_load_dwordx2 v[20:21], v[14:15], off
@@ -753,39 +751,42 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX90A-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v7, vcc
; GFX90A-NEXT: global_load_dwordx2 v[14:15], v[22:23], off offset:-2048
; GFX90A-NEXT: global_load_dwordx2 v[30:31], v[6:7], off
+; GFX90A-NEXT: global_load_dwordx2 v[8:9], v[6:7], off offset:-4096
+; GFX90A-NEXT: global_load_dwordx2 v[10:11], v[6:7], off offset:-2048
; GFX90A-NEXT: v_add_co_u32_e32 v6, vcc, 0x10000, v6
; GFX90A-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v7, vcc
; GFX90A-NEXT: s_addk_i32 s3, 0x2000
; GFX90A-NEXT: s_cmp_gt_u32 s3, 0x3fffff
-; GFX90A-NEXT: s_waitcnt vmcnt(8)
+; GFX90A-NEXT: s_waitcnt vmcnt(10)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v5, vcc
-; GFX90A-NEXT: s_waitcnt vmcnt(7)
+; GFX90A-NEXT: s_waitcnt vmcnt(9)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v18, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v19, v5, vcc
-; GFX90A-NEXT: s_waitcnt vmcnt(6)
+; GFX90A-NEXT: s_waitcnt vmcnt(8)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v20, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v21, v5, vcc
-; GFX90A-NEXT: s_waitcnt vmcnt(5)
+; GFX90A-NEXT: s_waitcnt vmcnt(7)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v16, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v17, v5, vcc
-; GFX90A-NEXT: s_waitcnt vmcnt(4)
+; GFX90A-NEXT: s_waitcnt vmcnt(6)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v24, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v25, v5, vcc
-; GFX90A-NEXT: s_waitcnt vmcnt(3)
+; GFX90A-NEXT: s_waitcnt vmcnt(5)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v26, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v27, v5, vcc
-; GFX90A-NEXT: s_waitcnt vmcnt(2)
+; GFX90A-NEXT: s_waitcnt vmcnt(4)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v28, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v29, v5, vcc
-; GFX90A-NEXT: s_waitcnt vmcnt(1)
+; GFX90A-NEXT: s_waitcnt vmcnt(3)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v14, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc
+; GFX90A-NEXT: s_waitcnt vmcnt(1)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v8, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc
+; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v5, vcc
-; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v30, v4
; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v31, v5, vcc
; GFX90A-NEXT: s_cbranch_scc0 .LBB1_2
diff --git a/llvm/test/CodeGen/AMDGPU/scheduler-rp-calc-one-successor-two-predecessors-bug.ll b/llvm/test/CodeGen/AMDGPU/scheduler-rp-calc-one-successor-two-predecessors-bug.ll
index 118c47e..cac1fe9 100644
--- a/llvm/test/CodeGen/AMDGPU/scheduler-rp-calc-one-successor-two-predecessors-bug.ll
+++ b/llvm/test/CodeGen/AMDGPU/scheduler-rp-calc-one-successor-two-predecessors-bug.ll
@@ -46,7 +46,7 @@ define amdgpu_ps void @_amdgpu_ps_main(float %arg) {
; GFX900-NEXT: s_mov_b64 exec, 0
; GFX900-NEXT: s_waitcnt vmcnt(0)
; GFX900-NEXT: v_mov_b32_e32 v1, 0
-; GFX900-NEXT: v_mov_b32_e32 v2, 0
+; GFX900-NEXT: v_mov_b32_e32 v2, v1
; GFX900-NEXT: .LBB0_5: ; %bb6
; GFX900-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX900-NEXT: s_waitcnt vmcnt(0)
@@ -75,7 +75,7 @@ bb5:
bb6:
%i7 = phi float [ 0.000000e+00, %bb5 ], [ %i3, %bb1 ]
%i8 = phi float [ 0.000000e+00, %bb5 ], [ 1.000000e+00, %bb1 ]
- %i9 = phi float [ undef, %bb5 ], [ %i4, %bb1 ]
+ %i9 = phi float [ poison, %bb5 ], [ %i4, %bb1 ]
%i10 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float 0.000000e+00, float %i7)
%i11 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %i8, float %i9)
call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 0, <2 x half> %i10, <2 x half> %i11, i1 false, i1 false)
diff --git a/llvm/test/CodeGen/AMDGPU/scratch-simple.ll b/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
index 7a3bff8..840916a 100644
--- a/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
+++ b/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
@@ -28,15 +28,20 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; SI-NEXT: s_mov_b32 s7, 0xe8f000
; SI-NEXT: s_add_u32 s4, s4, s0
; SI-NEXT: s_addc_u32 s5, s5, 0
+; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; SI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
; SI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; SI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; SI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
-; SI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
-; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
-; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
; SI-NEXT: buffer_store_dword v6, off, s[4:7], 0 offset:304
; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:300
; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:296
@@ -44,26 +49,19 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:288
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:256
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; SI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; SI-NEXT: s_mov_b32 s0, 0
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; SI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:252
; SI-NEXT: s_waitcnt expcnt(0)
@@ -76,19 +74,8 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; SI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; SI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; SI-NEXT: s_mov_b32 s0, 0
-; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
-; SI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
-; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
-; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
-; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:220
-; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
+; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; SI-NEXT: buffer_store_dword v16, off, s[4:7], 0 offset:212
; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:208
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:204
@@ -97,8 +84,22 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; SI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x200, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
+; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
+; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
+; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
+; SI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
+; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
+; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
+; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
+; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:196
; SI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; SI-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen
; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:832
; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:828
@@ -106,17 +107,16 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:820
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
-; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
-; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
-; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
+; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
+; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
+; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:784
; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:780
; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:776
@@ -153,37 +153,35 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; VI-NEXT: s_mov_b32 s7, 0xe80000
; VI-NEXT: s_add_u32 s4, s4, s0
; VI-NEXT: s_addc_u32 s5, s5, 0
+; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
+; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; VI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
+; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
; VI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; VI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; VI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
-; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
-; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
-; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
; VI-NEXT: buffer_store_dword v6, off, s[4:7], 0 offset:304
; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:300
; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:296
; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:292
; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:288
; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
-; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
-; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
-; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
-; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
-; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:256
; VI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; VI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; VI-NEXT: s_mov_b32 s0, 0
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
+; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; VI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:252
; VI-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -193,19 +191,8 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; VI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; VI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; VI-NEXT: s_mov_b32 s0, 0
-; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
-; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
-; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
-; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
-; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:220
-; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
+; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; VI-NEXT: buffer_store_dword v16, off, s[4:7], 0 offset:212
; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:208
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:204
@@ -213,24 +200,37 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; VI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x200, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
+; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
+; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
+; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
+; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
+; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
+; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
+; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
+; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
+; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
+; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:196
; VI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; VI-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen
; VI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:832
; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:828
; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:824
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:820
; VI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
+; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
+; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
+; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
-; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
-; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
-; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
-; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:784
; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:780
; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:776
@@ -266,36 +266,33 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX9-MUBUF-NEXT: s_mov_b32 s3, 0xe00000
; GFX9-MUBUF-NEXT: s_add_u32 s0, s0, s4
; GFX9-MUBUF-NEXT: s_addc_u32 s1, s1, 0
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:264
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:260
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:320
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:320
-; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:316
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:312
-; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:308
; GFX9-MUBUF-NEXT: buffer_store_dword v6, off, s[0:3], 0 offset:304
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:300
; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:296
; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:292
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:288
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:284
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:280
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:276
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:272
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:264
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:260
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:256
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:284
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v8, 0xbe319356
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:252
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -305,26 +302,30 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:220
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:212
+; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:200
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
+; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:316
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:312
+; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:308
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:280
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:276
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:272
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:268
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:248
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:240
; GFX9-MUBUF-NEXT: buffer_store_dword v11, off, s[0:3], 0 offset:232
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:228
; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:224
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:220
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:216
-; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:212
-; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:200
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
-; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:196
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; GFX9-MUBUF-NEXT: v_add_u32_e32 v1, 0x200, v0
; GFX9-MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen
; GFX9-MUBUF-NEXT: s_nop 0
@@ -333,16 +334,15 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:824
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:820
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:796
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:792
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:788
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:816
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:812
; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:808
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:804
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:800
-; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:796
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:792
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:788
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:784
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:780
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
@@ -392,7 +392,6 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:308
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:304
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:300
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:292
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:288
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:284
@@ -435,6 +434,7 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:200
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:196
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -448,8 +448,6 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:812
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:800
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:796
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:792
@@ -457,6 +455,8 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v19, off, s[0:3], 0 offset:784
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:780
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:772
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:768
@@ -503,7 +503,6 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:308
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:304
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:300
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:292
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:288
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:284
@@ -546,6 +545,7 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:200
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:196
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -559,8 +559,6 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:812
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:800
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:796
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:792
@@ -568,6 +566,8 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v19, off, s[0:3], 0 offset:784
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:780
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:772
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:768
@@ -974,42 +974,43 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v23, v21 :: v_dual_mov_b32 v8, 0x3f3d349e
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v7, 0x3f523be1
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v24, 0xbf523be3
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v26, v17
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v12, 0xbe319356 :: v_dual_mov_b32 v31, v19
-; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v2, v8
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, 0x3f5f2ee2 :: v_dual_mov_b32 v3, v7
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v5, 0x3f638e37
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v15, 0x3e319356
; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v11, 0xbe31934f :: v_dual_mov_b32 v4, v6
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v24 :: v_dual_mov_b32 v4, v6
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[5:8], off offset:304
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[1:4], off offset:288
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, 0xbefcd89f :: v_dual_mov_b32 v27, v24
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, v0
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, 0xbefcd89f :: v_dual_mov_b32 v1, v0
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v13, 0x3eae29dc :: v_dual_mov_b32 v34, v5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v3, 0xbefcd8a3
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v9, 0xb702e758 :: v_dual_mov_b32 v36, v6
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v25, 0x3f20e7f5
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v11, 0xbe31934f :: v_dual_mov_b32 v36, v6
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, 0xb7043519 :: v_dual_mov_b32 v29, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v18, 0xbf20e7f5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v14, 0x3eae29d8
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v16, 0x3e31934f
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, 0x3efcd89c
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v30, v13
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[0:3], off offset:272
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[9:12], off offset:256
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, 0x3f20e7f4
+; GFX11-FLATSCR-NEXT: s_clause 0x1
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v20, 0x3efcd89c
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, v18
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
-; GFX11-FLATSCR-NEXT: s_clause 0x3
+; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[13:16], off offset:240
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
; GFX11-FLATSCR-NEXT: scratch_load_b32 v14, v37, off
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, v0
@@ -1024,8 +1025,7 @@ define amdgpu_ps float @ps_main(i32 %idx) {
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:784
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v31, 0xbf5f2ee2 :: v_dual_mov_b32 v32, v6
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v6, v13
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v19, v2
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, v13 :: v_dual_mov_b32 v19, v2
; GFX11-FLATSCR-NEXT: s_clause 0x4
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[33:36], off offset:768
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:752
@@ -1051,15 +1051,20 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; SI-NEXT: s_mov_b32 s7, 0xe8f000
; SI-NEXT: s_add_u32 s4, s4, s0
; SI-NEXT: s_addc_u32 s5, s5, 0
+; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; SI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
; SI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; SI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; SI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
-; SI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
-; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
-; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
; SI-NEXT: buffer_store_dword v6, off, s[4:7], 0 offset:304
; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:300
; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:296
@@ -1067,26 +1072,19 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:288
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:256
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; SI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; SI-NEXT: s_mov_b32 s0, 0
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; SI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:252
; SI-NEXT: s_waitcnt expcnt(0)
@@ -1099,19 +1097,8 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; SI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; SI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; SI-NEXT: s_mov_b32 s0, 0
-; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
-; SI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
-; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
-; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
-; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:220
-; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
+; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; SI-NEXT: buffer_store_dword v16, off, s[4:7], 0 offset:212
; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:208
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:204
@@ -1120,8 +1107,22 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; SI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x200, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
+; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
+; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
+; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
+; SI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
+; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
+; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
+; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
+; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:196
; SI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; SI-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen
; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:832
; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:828
@@ -1129,17 +1130,16 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:820
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
-; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
-; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
-; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
+; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
+; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
+; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:784
; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:780
; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:776
@@ -1176,37 +1176,35 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; VI-NEXT: s_mov_b32 s7, 0xe80000
; VI-NEXT: s_add_u32 s4, s4, s0
; VI-NEXT: s_addc_u32 s5, s5, 0
+; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
+; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; VI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
+; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
; VI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; VI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; VI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
-; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
-; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
-; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
; VI-NEXT: buffer_store_dword v6, off, s[4:7], 0 offset:304
; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:300
; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:296
; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:292
; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:288
; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
-; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
-; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
-; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
-; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
-; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:256
; VI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; VI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; VI-NEXT: s_mov_b32 s0, 0
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
+; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; VI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:252
; VI-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -1216,19 +1214,8 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; VI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; VI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; VI-NEXT: s_mov_b32 s0, 0
-; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
-; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
-; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
-; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
-; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:220
-; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
+; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; VI-NEXT: buffer_store_dword v16, off, s[4:7], 0 offset:212
; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:208
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:204
@@ -1236,24 +1223,37 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; VI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x200, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
+; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
+; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
+; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
+; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
+; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
+; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
+; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
+; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
+; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
+; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:196
; VI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; VI-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen
; VI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:832
; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:828
; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:824
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:820
; VI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
+; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
+; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
+; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
-; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
-; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
-; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
-; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:784
; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:780
; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:776
@@ -1289,36 +1289,33 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: s_mov_b32 s3, 0xe00000
; GFX9-MUBUF-NEXT: s_add_u32 s0, s0, s4
; GFX9-MUBUF-NEXT: s_addc_u32 s1, s1, 0
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:264
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:260
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:320
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:320
-; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:316
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:312
-; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:308
; GFX9-MUBUF-NEXT: buffer_store_dword v6, off, s[0:3], 0 offset:304
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:300
; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:296
; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:292
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:288
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:284
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:280
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:276
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:272
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:264
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:260
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:256
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:284
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v8, 0xbe319356
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:252
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -1328,26 +1325,30 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:220
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:212
+; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:200
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
+; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:316
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:312
+; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:308
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:280
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:276
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:272
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:268
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:248
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:240
; GFX9-MUBUF-NEXT: buffer_store_dword v11, off, s[0:3], 0 offset:232
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:228
; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:224
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:220
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:216
-; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:212
-; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:200
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
-; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:196
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; GFX9-MUBUF-NEXT: v_add_u32_e32 v1, 0x200, v0
; GFX9-MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen
; GFX9-MUBUF-NEXT: s_nop 0
@@ -1356,16 +1357,15 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:824
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:820
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:796
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:792
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:788
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:816
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:812
; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:808
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:804
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:800
-; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:796
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:792
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:788
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:784
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:780
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
@@ -1415,7 +1415,6 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:308
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:304
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:300
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:292
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:288
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:284
@@ -1458,6 +1457,7 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:200
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:196
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -1471,8 +1471,6 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:812
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:800
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:796
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:792
@@ -1480,6 +1478,8 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v19, off, s[0:3], 0 offset:784
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:780
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:772
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:768
@@ -1526,7 +1526,6 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:308
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:304
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:300
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:292
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:288
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:284
@@ -1569,6 +1568,7 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:200
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:196
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -1582,8 +1582,6 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:812
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:800
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:796
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:792
@@ -1591,6 +1589,8 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v19, off, s[0:3], 0 offset:784
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:780
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:772
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:768
@@ -1997,42 +1997,43 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v23, v21 :: v_dual_mov_b32 v8, 0x3f3d349e
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v7, 0x3f523be1
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v24, 0xbf523be3
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v26, v17
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v12, 0xbe319356 :: v_dual_mov_b32 v31, v19
-; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v2, v8
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, 0x3f5f2ee2 :: v_dual_mov_b32 v3, v7
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v5, 0x3f638e37
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v15, 0x3e319356
; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v11, 0xbe31934f :: v_dual_mov_b32 v4, v6
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v24 :: v_dual_mov_b32 v4, v6
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[5:8], off offset:304
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[1:4], off offset:288
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, 0xbefcd89f :: v_dual_mov_b32 v27, v24
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, v0
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, 0xbefcd89f :: v_dual_mov_b32 v1, v0
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v13, 0x3eae29dc :: v_dual_mov_b32 v34, v5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v3, 0xbefcd8a3
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v9, 0xb702e758 :: v_dual_mov_b32 v36, v6
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v25, 0x3f20e7f5
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v11, 0xbe31934f :: v_dual_mov_b32 v36, v6
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, 0xb7043519 :: v_dual_mov_b32 v29, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v18, 0xbf20e7f5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v14, 0x3eae29d8
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v16, 0x3e31934f
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, 0x3efcd89c
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v30, v13
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[0:3], off offset:272
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[9:12], off offset:256
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, 0x3f20e7f4
+; GFX11-FLATSCR-NEXT: s_clause 0x1
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v20, 0x3efcd89c
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, v18
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
-; GFX11-FLATSCR-NEXT: s_clause 0x3
+; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[13:16], off offset:240
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
; GFX11-FLATSCR-NEXT: scratch_load_b32 v14, v37, off
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, v0
@@ -2047,8 +2048,7 @@ define amdgpu_vs float @vs_main(i32 %idx) {
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:784
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v31, 0xbf5f2ee2 :: v_dual_mov_b32 v32, v6
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v6, v13
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v19, v2
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, v13 :: v_dual_mov_b32 v19, v2
; GFX11-FLATSCR-NEXT: s_clause 0x4
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[33:36], off offset:768
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:752
@@ -2074,15 +2074,20 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; SI-NEXT: s_mov_b32 s7, 0xe8f000
; SI-NEXT: s_add_u32 s4, s4, s0
; SI-NEXT: s_addc_u32 s5, s5, 0
+; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; SI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
; SI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; SI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; SI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
-; SI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
-; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
-; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
; SI-NEXT: buffer_store_dword v6, off, s[4:7], 0 offset:304
; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:300
; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:296
@@ -2090,26 +2095,19 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:288
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:256
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; SI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; SI-NEXT: s_mov_b32 s0, 0
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; SI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:252
; SI-NEXT: s_waitcnt expcnt(0)
@@ -2122,19 +2120,8 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; SI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; SI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; SI-NEXT: s_mov_b32 s0, 0
-; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
-; SI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
-; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
-; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
-; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:220
-; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
+; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; SI-NEXT: buffer_store_dword v16, off, s[4:7], 0 offset:212
; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:208
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:204
@@ -2143,8 +2130,22 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; SI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x200, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
+; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
+; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
+; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
+; SI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
+; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
+; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
+; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
+; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:196
; SI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; SI-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen
; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:832
; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:828
@@ -2152,17 +2153,16 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:820
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
-; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
-; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
-; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
+; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
+; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
+; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:784
; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:780
; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:776
@@ -2199,37 +2199,35 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; VI-NEXT: s_mov_b32 s7, 0xe80000
; VI-NEXT: s_add_u32 s4, s4, s0
; VI-NEXT: s_addc_u32 s5, s5, 0
+; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
+; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; VI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
+; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
; VI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; VI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; VI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
-; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
-; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
-; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
; VI-NEXT: buffer_store_dword v6, off, s[4:7], 0 offset:304
; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:300
; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:296
; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:292
; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:288
; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
-; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
-; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
-; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
-; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
-; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:256
; VI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; VI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; VI-NEXT: s_mov_b32 s0, 0
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
+; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; VI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:252
; VI-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -2239,19 +2237,8 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; VI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; VI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; VI-NEXT: s_mov_b32 s0, 0
-; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
-; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
-; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
-; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
-; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:220
-; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
+; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; VI-NEXT: buffer_store_dword v16, off, s[4:7], 0 offset:212
; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:208
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:204
@@ -2259,24 +2246,37 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; VI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x200, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
+; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
+; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
+; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
+; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
+; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
+; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
+; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
+; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
+; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
+; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:196
; VI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; VI-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen
; VI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:832
; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:828
; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:824
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:820
; VI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
+; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
+; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
+; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
-; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
-; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
-; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
-; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:784
; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:780
; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:776
@@ -2312,36 +2312,33 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: s_mov_b32 s3, 0xe00000
; GFX9-MUBUF-NEXT: s_add_u32 s0, s0, s4
; GFX9-MUBUF-NEXT: s_addc_u32 s1, s1, 0
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:264
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:260
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:320
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:320
-; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:316
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:312
-; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:308
; GFX9-MUBUF-NEXT: buffer_store_dword v6, off, s[0:3], 0 offset:304
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:300
; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:296
; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:292
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:288
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:284
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:280
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:276
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:272
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:264
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:260
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:256
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:284
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v8, 0xbe319356
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:252
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -2351,26 +2348,30 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:220
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:212
+; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:200
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
+; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:316
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:312
+; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:308
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:280
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:276
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:272
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:268
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:248
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:240
; GFX9-MUBUF-NEXT: buffer_store_dword v11, off, s[0:3], 0 offset:232
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:228
; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:224
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:220
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:216
-; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:212
-; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:200
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
-; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:196
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; GFX9-MUBUF-NEXT: v_add_u32_e32 v1, 0x200, v0
; GFX9-MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen
; GFX9-MUBUF-NEXT: s_nop 0
@@ -2379,16 +2380,15 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:824
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:820
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:796
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:792
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:788
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:816
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:812
; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:808
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:804
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:800
-; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:796
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:792
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:788
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:784
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:780
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
@@ -2438,7 +2438,6 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:308
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:304
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:300
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:292
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:288
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:284
@@ -2481,6 +2480,7 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:200
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:196
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -2494,8 +2494,6 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:812
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:800
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:796
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:792
@@ -2503,6 +2501,8 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v19, off, s[0:3], 0 offset:784
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:780
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:772
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:768
@@ -2549,7 +2549,6 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:308
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:304
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:300
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:292
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:288
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:284
@@ -2592,6 +2591,7 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:200
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:196
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -2605,8 +2605,6 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:812
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:800
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:796
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:792
@@ -2614,6 +2612,8 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v19, off, s[0:3], 0 offset:784
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:780
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:772
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:768
@@ -3020,42 +3020,43 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v23, v21 :: v_dual_mov_b32 v8, 0x3f3d349e
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v7, 0x3f523be1
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v24, 0xbf523be3
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v26, v17
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v12, 0xbe319356 :: v_dual_mov_b32 v31, v19
-; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v2, v8
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, 0x3f5f2ee2 :: v_dual_mov_b32 v3, v7
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v5, 0x3f638e37
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v15, 0x3e319356
; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v11, 0xbe31934f :: v_dual_mov_b32 v4, v6
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v24 :: v_dual_mov_b32 v4, v6
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[5:8], off offset:304
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[1:4], off offset:288
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, 0xbefcd89f :: v_dual_mov_b32 v27, v24
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, v0
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, 0xbefcd89f :: v_dual_mov_b32 v1, v0
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v13, 0x3eae29dc :: v_dual_mov_b32 v34, v5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v3, 0xbefcd8a3
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v9, 0xb702e758 :: v_dual_mov_b32 v36, v6
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v25, 0x3f20e7f5
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v11, 0xbe31934f :: v_dual_mov_b32 v36, v6
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, 0xb7043519 :: v_dual_mov_b32 v29, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v18, 0xbf20e7f5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v14, 0x3eae29d8
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v16, 0x3e31934f
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, 0x3efcd89c
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v30, v13
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[0:3], off offset:272
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[9:12], off offset:256
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, 0x3f20e7f4
+; GFX11-FLATSCR-NEXT: s_clause 0x1
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v20, 0x3efcd89c
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, v18
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
-; GFX11-FLATSCR-NEXT: s_clause 0x3
+; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[13:16], off offset:240
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
; GFX11-FLATSCR-NEXT: scratch_load_b32 v14, v37, off
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, v0
@@ -3070,8 +3071,7 @@ define amdgpu_cs float @cs_main(i32 %idx) {
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:784
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v31, 0xbf5f2ee2 :: v_dual_mov_b32 v32, v6
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v6, v13
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v19, v2
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, v13 :: v_dual_mov_b32 v19, v2
; GFX11-FLATSCR-NEXT: s_clause 0x4
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[33:36], off offset:768
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:752
@@ -3097,15 +3097,20 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; SI-NEXT: s_mov_b32 s7, 0xe8f000
; SI-NEXT: s_add_u32 s4, s4, s0
; SI-NEXT: s_addc_u32 s5, s5, 0
+; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; SI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
; SI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; SI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; SI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
-; SI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
-; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
-; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
; SI-NEXT: buffer_store_dword v6, off, s[4:7], 0 offset:304
; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:300
; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:296
@@ -3113,26 +3118,19 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:288
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:256
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; SI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; SI-NEXT: s_mov_b32 s0, 0
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; SI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:252
; SI-NEXT: s_waitcnt expcnt(0)
@@ -3145,19 +3143,8 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; SI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; SI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; SI-NEXT: s_mov_b32 s0, 0
-; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
-; SI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
-; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
-; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
-; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:220
-; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
+; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; SI-NEXT: buffer_store_dword v16, off, s[4:7], 0 offset:212
; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:208
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:204
@@ -3166,8 +3153,22 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; SI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x200, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
+; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
+; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
+; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
+; SI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
+; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
+; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
+; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
+; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:196
; SI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; SI-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen
; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:832
; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:828
@@ -3175,17 +3176,16 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:820
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
-; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
-; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
-; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
+; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
+; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
+; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:784
; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:780
; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:776
@@ -3222,37 +3222,35 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; VI-NEXT: s_mov_b32 s7, 0xe80000
; VI-NEXT: s_add_u32 s4, s4, s0
; VI-NEXT: s_addc_u32 s5, s5, 0
+; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
+; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; VI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
+; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
; VI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; VI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; VI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
-; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
-; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
-; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
; VI-NEXT: buffer_store_dword v6, off, s[4:7], 0 offset:304
; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:300
; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:296
; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:292
; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:288
; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
-; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
-; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
-; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
-; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
-; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:256
; VI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; VI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; VI-NEXT: s_mov_b32 s0, 0
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
+; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; VI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:252
; VI-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -3262,19 +3260,8 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; VI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; VI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; VI-NEXT: s_mov_b32 s0, 0
-; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
-; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
-; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
-; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
-; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:220
-; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
+; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; VI-NEXT: buffer_store_dword v16, off, s[4:7], 0 offset:212
; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:208
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:204
@@ -3282,24 +3269,37 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; VI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x200, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
+; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
+; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
+; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
+; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
+; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
+; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
+; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
+; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
+; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
+; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:196
; VI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; VI-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen
; VI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:832
; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:828
; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:824
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:820
; VI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
+; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
+; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
+; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
-; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
-; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
-; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
-; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:784
; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:780
; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:776
@@ -3334,36 +3334,33 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: s_mov_b32 s3, 0xe00000
; GFX9-MUBUF-NEXT: s_add_u32 s0, s0, s5
; GFX9-MUBUF-NEXT: s_addc_u32 s1, s1, 0
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:264
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:260
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:320
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:320
-; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:316
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:312
-; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:308
; GFX9-MUBUF-NEXT: buffer_store_dword v6, off, s[0:3], 0 offset:304
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:300
; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:296
; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:292
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:288
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:284
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:280
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:276
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:272
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:264
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:260
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:256
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:284
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v8, 0xbe319356
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:252
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -3373,26 +3370,30 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:220
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:212
+; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:200
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
+; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:316
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:312
+; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:308
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:280
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:276
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:272
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:268
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:248
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:240
; GFX9-MUBUF-NEXT: buffer_store_dword v11, off, s[0:3], 0 offset:232
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:228
; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:224
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:220
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:216
-; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:212
-; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:200
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
-; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:196
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; GFX9-MUBUF-NEXT: v_add_u32_e32 v1, 0x200, v0
; GFX9-MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen
; GFX9-MUBUF-NEXT: s_nop 0
@@ -3401,16 +3402,15 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:824
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:820
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:796
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:792
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:788
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:816
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:812
; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:808
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:804
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:800
-; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:796
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:792
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:788
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:784
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:780
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
@@ -3459,7 +3459,6 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:308
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:304
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:300
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:292
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:288
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:284
@@ -3502,6 +3501,7 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:200
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:196
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -3515,8 +3515,6 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:812
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:800
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:796
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:792
@@ -3524,6 +3522,8 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v19, off, s[0:3], 0 offset:784
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:780
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:772
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:768
@@ -3569,7 +3569,6 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:308
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:304
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:300
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:292
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:288
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:284
@@ -3612,6 +3611,7 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:200
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:196
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -3625,8 +3625,6 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:812
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:800
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:796
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:792
@@ -3634,6 +3632,8 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v19, off, s[0:3], 0 offset:784
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:780
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:772
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:768
@@ -4040,42 +4040,43 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v23, v21 :: v_dual_mov_b32 v8, 0x3f3d349e
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v7, 0x3f523be1
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v24, 0xbf523be3
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v26, v17
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v12, 0xbe319356 :: v_dual_mov_b32 v31, v19
-; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v2, v8
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, 0x3f5f2ee2 :: v_dual_mov_b32 v3, v7
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v5, 0x3f638e37
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v15, 0x3e319356
; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v11, 0xbe31934f :: v_dual_mov_b32 v4, v6
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v24 :: v_dual_mov_b32 v4, v6
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[5:8], off offset:304
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[1:4], off offset:288
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, 0xbefcd89f :: v_dual_mov_b32 v27, v24
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, v0
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, 0xbefcd89f :: v_dual_mov_b32 v1, v0
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v13, 0x3eae29dc :: v_dual_mov_b32 v34, v5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v3, 0xbefcd8a3
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v9, 0xb702e758 :: v_dual_mov_b32 v36, v6
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v25, 0x3f20e7f5
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v11, 0xbe31934f :: v_dual_mov_b32 v36, v6
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, 0xb7043519 :: v_dual_mov_b32 v29, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v18, 0xbf20e7f5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v14, 0x3eae29d8
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v16, 0x3e31934f
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, 0x3efcd89c
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v30, v13
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[0:3], off offset:272
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[9:12], off offset:256
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, 0x3f20e7f4
+; GFX11-FLATSCR-NEXT: s_clause 0x1
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v20, 0x3efcd89c
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, v18
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
-; GFX11-FLATSCR-NEXT: s_clause 0x3
+; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[13:16], off offset:240
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
; GFX11-FLATSCR-NEXT: scratch_load_b32 v14, v37, off
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, v0
@@ -4090,8 +4091,7 @@ define amdgpu_hs float @hs_main(i32 %idx) {
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:784
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v31, 0xbf5f2ee2 :: v_dual_mov_b32 v32, v6
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v6, v13
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v19, v2
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, v13 :: v_dual_mov_b32 v19, v2
; GFX11-FLATSCR-NEXT: s_clause 0x4
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[33:36], off offset:768
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:752
@@ -4117,15 +4117,20 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; SI-NEXT: s_mov_b32 s7, 0xe8f000
; SI-NEXT: s_add_u32 s4, s4, s0
; SI-NEXT: s_addc_u32 s5, s5, 0
+; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; SI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
; SI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; SI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; SI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
-; SI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
-; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
-; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
; SI-NEXT: buffer_store_dword v6, off, s[4:7], 0 offset:304
; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:300
; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:296
@@ -4133,26 +4138,19 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:288
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:256
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; SI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; SI-NEXT: s_mov_b32 s0, 0
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; SI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:252
; SI-NEXT: s_waitcnt expcnt(0)
@@ -4165,19 +4163,8 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; SI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; SI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; SI-NEXT: s_mov_b32 s0, 0
-; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
-; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
-; SI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
-; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
-; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
-; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:220
-; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
+; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; SI-NEXT: buffer_store_dword v16, off, s[4:7], 0 offset:212
; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:208
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:204
@@ -4186,8 +4173,22 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; SI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x200, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
+; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
+; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
+; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
+; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
+; SI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
+; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
+; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
+; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
+; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
; SI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:196
; SI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; SI-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen
; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:832
; SI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:828
@@ -4195,17 +4196,16 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; SI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:820
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
-; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
-; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
-; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
-; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
-; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
; SI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
; SI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
+; SI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
+; SI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
+; SI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
+; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
+; SI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:784
; SI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:780
; SI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:776
@@ -4242,37 +4242,35 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; VI-NEXT: s_mov_b32 s7, 0xe80000
; VI-NEXT: s_add_u32 s4, s4, s0
; VI-NEXT: s_addc_u32 s5, s5, 0
+; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
+; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; VI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
+; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
; VI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; VI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; VI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:320
-; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
-; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
-; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
; VI-NEXT: buffer_store_dword v6, off, s[4:7], 0 offset:304
; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:300
; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:296
; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:292
; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:288
; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
-; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
-; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
-; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:264
-; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:260
-; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:256
; VI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; VI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; VI-NEXT: s_mov_b32 s0, 0
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:284
+; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; VI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:252
; VI-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -4282,19 +4280,8 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; VI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; VI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; VI-NEXT: s_mov_b32 s0, 0
-; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
-; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
-; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
-; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
-; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
-; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:220
-; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
+; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; VI-NEXT: buffer_store_dword v16, off, s[4:7], 0 offset:212
; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:208
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:204
@@ -4302,24 +4289,37 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; VI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x200, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: buffer_store_dword v3, off, s[4:7], 0 offset:316
+; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:312
+; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:308
+; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:280
+; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:276
+; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:272
+; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:268
+; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:248
+; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:240
+; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:232
+; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:228
+; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:224
+; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:216
; VI-NEXT: buffer_store_dword v14, off, s[4:7], 0 offset:196
; VI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; VI-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen
; VI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:832
; VI-NEXT: buffer_store_dword v11, off, s[4:7], 0 offset:828
; VI-NEXT: buffer_store_dword v10, off, s[4:7], 0 offset:824
; VI-NEXT: buffer_store_dword v9, off, s[4:7], 0 offset:820
; VI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
+; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
+; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
+; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v17, off, s[4:7], 0 offset:816
; VI-NEXT: buffer_store_dword v8, off, s[4:7], 0 offset:812
; VI-NEXT: buffer_store_dword v7, off, s[4:7], 0 offset:808
; VI-NEXT: buffer_store_dword v2, off, s[4:7], 0 offset:804
; VI-NEXT: buffer_store_dword v12, off, s[4:7], 0 offset:800
-; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:796
-; VI-NEXT: buffer_store_dword v13, off, s[4:7], 0 offset:792
-; VI-NEXT: buffer_store_dword v4, off, s[4:7], 0 offset:788
-; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v18, off, s[4:7], 0 offset:784
; VI-NEXT: buffer_store_dword v5, off, s[4:7], 0 offset:780
; VI-NEXT: buffer_store_dword v15, off, s[4:7], 0 offset:776
@@ -4354,36 +4354,33 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: s_mov_b32 s3, 0xe00000
; GFX9-MUBUF-NEXT: s_add_u32 s0, s0, s5
; GFX9-MUBUF-NEXT: s_addc_u32 s1, s1, 0
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:264
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:260
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:320
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:320
-; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:316
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:312
-; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:308
; GFX9-MUBUF-NEXT: buffer_store_dword v6, off, s[0:3], 0 offset:304
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:300
; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:296
; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:292
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:288
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:284
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:280
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:276
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:272
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:264
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:260
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:256
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:284
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v8, 0xbe319356
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:252
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -4393,26 +4390,30 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:220
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:212
+; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:200
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
+; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:316
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:312
+; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:308
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:280
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:276
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:272
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:268
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:248
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:240
; GFX9-MUBUF-NEXT: buffer_store_dword v11, off, s[0:3], 0 offset:232
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:228
; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:224
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:220
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:216
-; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:212
-; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:200
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
-; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:196
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; GFX9-MUBUF-NEXT: v_add_u32_e32 v1, 0x200, v0
; GFX9-MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen
; GFX9-MUBUF-NEXT: s_nop 0
@@ -4421,16 +4422,15 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:824
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:820
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:796
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:792
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:788
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:816
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:812
; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:808
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:804
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:800
-; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:796
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[0:3], 0 offset:792
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:788
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[0:3], 0 offset:784
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:780
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
@@ -4479,7 +4479,6 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:308
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:304
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:300
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:292
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:288
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:284
@@ -4522,6 +4521,7 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:200
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:196
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -4535,8 +4535,6 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:812
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:800
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:796
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:792
@@ -4544,6 +4542,8 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v19, off, s[0:3], 0 offset:784
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:780
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:772
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:768
@@ -4589,7 +4589,6 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:308
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:304
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:300
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:292
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:288
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v7, off, s[0:3], 0 offset:284
@@ -4632,6 +4631,7 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:208
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:204
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[0:3], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:200
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v17, off, s[0:3], 0 offset:196
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -4645,8 +4645,6 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], 0 offset:812
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v10, off, s[0:3], 0 offset:800
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[0:3], 0 offset:796
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[0:3], 0 offset:792
@@ -4654,6 +4652,8 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v19, off, s[0:3], 0 offset:784
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:780
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[0:3], 0 offset:808
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[0:3], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[0:3], 0 offset:776
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[0:3], 0 offset:772
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v16, off, s[0:3], 0 offset:768
@@ -5060,42 +5060,43 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v23, v21 :: v_dual_mov_b32 v8, 0x3f3d349e
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v7, 0x3f523be1
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v24, 0xbf523be3
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v26, v17
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v12, 0xbe319356 :: v_dual_mov_b32 v31, v19
-; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v2, v8
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, 0x3f5f2ee2 :: v_dual_mov_b32 v3, v7
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v5, 0x3f638e37
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v15, 0x3e319356
; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v11, 0xbe31934f :: v_dual_mov_b32 v4, v6
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v24 :: v_dual_mov_b32 v4, v6
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[5:8], off offset:304
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[1:4], off offset:288
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, 0xbefcd89f :: v_dual_mov_b32 v27, v24
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, v0
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, 0xbefcd89f :: v_dual_mov_b32 v1, v0
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v13, 0x3eae29dc :: v_dual_mov_b32 v34, v5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v3, 0xbefcd8a3
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v9, 0xb702e758 :: v_dual_mov_b32 v36, v6
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v25, 0x3f20e7f5
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v11, 0xbe31934f :: v_dual_mov_b32 v36, v6
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, 0xb7043519 :: v_dual_mov_b32 v29, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v18, 0xbf20e7f5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v14, 0x3eae29d8
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v16, 0x3e31934f
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, 0x3efcd89c
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v30, v13
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[0:3], off offset:272
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[9:12], off offset:256
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, 0x3f20e7f4
+; GFX11-FLATSCR-NEXT: s_clause 0x1
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v20, 0x3efcd89c
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, v18
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
-; GFX11-FLATSCR-NEXT: s_clause 0x3
+; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[13:16], off offset:240
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
; GFX11-FLATSCR-NEXT: scratch_load_b32 v14, v37, off
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, v0
@@ -5110,8 +5111,7 @@ define amdgpu_gs float @gs_main(i32 %idx) {
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:784
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v31, 0xbf5f2ee2 :: v_dual_mov_b32 v32, v6
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v6, v13
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v19, v2
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, v13 :: v_dual_mov_b32 v19, v2
; GFX11-FLATSCR-NEXT: s_clause 0x4
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[33:36], off offset:768
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:752
@@ -5141,15 +5141,20 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; SI-NEXT: s_mov_b32 s11, 0xe8f000
; SI-NEXT: s_add_u32 s8, s8, s6
; SI-NEXT: s_addc_u32 s9, s9, 0
+; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; SI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
; SI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; SI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; SI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
-; SI-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
-; SI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
-; SI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
; SI-NEXT: buffer_store_dword v6, off, s[8:11], 0 offset:304
; SI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:300
; SI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:296
@@ -5157,26 +5162,19 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:288
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
-; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
-; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
-; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:256
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; SI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; SI-NEXT: s_mov_b32 s0, 0
+; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; SI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:252
; SI-NEXT: s_waitcnt expcnt(0)
@@ -5189,19 +5187,8 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; SI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; SI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; SI-NEXT: s_mov_b32 s0, 0
-; SI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:268
-; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:248
-; SI-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:240
-; SI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:232
-; SI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:228
-; SI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:224
; SI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:220
-; SI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:216
+; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; SI-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:212
; SI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
; SI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
@@ -5210,8 +5197,22 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; SI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x200, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
+; SI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
+; SI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
+; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
+; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
+; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
+; SI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:268
+; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:248
+; SI-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:240
+; SI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:232
+; SI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:228
+; SI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:224
+; SI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:216
; SI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:196
; SI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; SI-NEXT: buffer_load_dword v0, v0, s[8:11], 0 offen
; SI-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:832
; SI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:828
@@ -5219,17 +5220,16 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:820
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
-; SI-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:816
-; SI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:812
-; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:808
-; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:804
-; SI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:796
; SI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:792
; SI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:788
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
+; SI-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:816
+; SI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:812
+; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:808
+; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:804
+; SI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:784
; SI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:780
; SI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:776
@@ -5267,37 +5267,35 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; VI-NEXT: s_mov_b32 s11, 0xe80000
; VI-NEXT: s_add_u32 s8, s8, s6
; VI-NEXT: s_addc_u32 s9, s9, 0
+; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
+; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; VI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
+; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
; VI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; VI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; VI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
-; VI-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
-; VI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
-; VI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
; VI-NEXT: buffer_store_dword v6, off, s[8:11], 0 offset:304
; VI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:300
; VI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:296
; VI-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:292
; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:288
; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
-; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
-; VI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
-; VI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
-; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
-; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
-; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:256
; VI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; VI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; VI-NEXT: s_mov_b32 s0, 0
+; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
+; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; VI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:252
; VI-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -5307,19 +5305,8 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; VI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; VI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; VI-NEXT: s_mov_b32 s0, 0
-; VI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:268
-; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:248
-; VI-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:240
-; VI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:232
-; VI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:228
-; VI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:224
; VI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:220
-; VI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:216
+; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; VI-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:212
; VI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
; VI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
@@ -5327,24 +5314,37 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; VI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x200, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
+; VI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
+; VI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
+; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
+; VI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
+; VI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
+; VI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:268
+; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:248
+; VI-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:240
+; VI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:232
+; VI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:228
+; VI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:224
+; VI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:216
; VI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:196
; VI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; VI-NEXT: buffer_load_dword v0, v0, s[8:11], 0 offen
; VI-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:832
; VI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:828
; VI-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:824
; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:820
; VI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; VI-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:796
+; VI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:792
+; VI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:788
+; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:816
; VI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:812
; VI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:808
; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:804
; VI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:800
-; VI-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:796
-; VI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:792
-; VI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:788
-; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:784
; VI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:780
; VI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:776
@@ -5380,36 +5380,33 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX9-MUBUF-NEXT: s_mov_b32 s11, 0xe00000
; GFX9-MUBUF-NEXT: s_add_u32 s8, s8, s5
; GFX9-MUBUF-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
-; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
-; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
; GFX9-MUBUF-NEXT: buffer_store_dword v6, off, s[8:11], 0 offset:304
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:300
; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:296
; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:292
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:288
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:256
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v8, 0xbe319356
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:252
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -5419,26 +5416,30 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:220
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:212
+; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:200
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
+; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
+; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:268
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:248
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:240
; GFX9-MUBUF-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:232
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:228
; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:224
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:220
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:216
-; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:212
-; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:200
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
-; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:196
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; GFX9-MUBUF-NEXT: v_add_u32_e32 v1, 0x200, v0
; GFX9-MUBUF-NEXT: buffer_load_dword v0, v0, s[8:11], 0 offen
; GFX9-MUBUF-NEXT: s_nop 0
@@ -5447,16 +5448,15 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:824
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:820
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:796
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:792
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:788
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:816
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:812
; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:808
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:804
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:800
-; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:796
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:792
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:788
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:784
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:780
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:776
@@ -5491,10 +5491,10 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W32-MUBUF-NEXT: s_mov_b32 s10, -1
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v1, 0xbf20e7f4
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v2, 0x3f3d349e
-; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f5f2ee2
; GFX10_W32-MUBUF-NEXT: s_mov_b32 s11, 0x31c16000
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f523be1
; GFX10_W32-MUBUF-NEXT: s_add_u32 s8, s8, s5
+; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f5f2ee2
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbefcd8a3
; GFX10_W32-MUBUF-NEXT: s_addc_u32 s9, s9, 0
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f638e37
@@ -5505,8 +5505,6 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:312
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:308
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:304
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:300
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:292
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:288
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:284
@@ -5549,6 +5547,8 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:300
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:200
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:196
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -5562,8 +5562,6 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:812
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:808
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:800
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:796
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:792
@@ -5571,6 +5569,8 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v19, off, s[8:11], 0 offset:784
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:780
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:808
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:776
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:772
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:768
@@ -5602,10 +5602,10 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W64-MUBUF-NEXT: s_mov_b32 s10, -1
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v1, 0xbf20e7f4
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v2, 0x3f3d349e
-; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f5f2ee2
; GFX10_W64-MUBUF-NEXT: s_mov_b32 s11, 0x31e16000
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f523be1
; GFX10_W64-MUBUF-NEXT: s_add_u32 s8, s8, s5
+; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f5f2ee2
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbefcd8a3
; GFX10_W64-MUBUF-NEXT: s_addc_u32 s9, s9, 0
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f638e37
@@ -5616,8 +5616,6 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:312
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:308
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:304
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:300
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:292
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:288
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:284
@@ -5660,6 +5658,8 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:300
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:200
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:196
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -5673,8 +5673,6 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:812
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:808
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:800
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:796
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:792
@@ -5682,6 +5680,8 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v19, off, s[8:11], 0 offset:784
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:780
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:808
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:776
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:772
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:768
@@ -6093,10 +6093,10 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v0, 0xbeae29dc :: v_dual_mov_b32 v23, v21
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v6, 0x3f5f2ee2
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v5, 0x3f638e37
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v24, 0xbf523be3
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[1:4], off offset:320
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v7
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v4, v6
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v24, 0xbf523be3
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[5:8], off offset:304
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[1:4], off offset:288
@@ -6105,29 +6105,31 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v13, 0x3eae29dc :: v_dual_mov_b32 v34, v5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v3, 0xbefcd8a3
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v15, 0x3e319356 :: v_dual_mov_b32 v36, v6
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, 0xb702e758
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v10, 0xb7043519
+; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, 0xb7043519 :: v_dual_mov_b32 v29, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v11, 0xbe31934f
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v12, 0xbe319356 :: v_dual_mov_b32 v31, v19
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v18, 0xbf20e7f5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v14, 0x3eae29d8
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v16, 0x3e31934f
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v20, 0x3efcd89c :: v_dual_mov_b32 v29, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v30, v13
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[0:3], off offset:272
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[9:12], off offset:256
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, 0x3f20e7f4
+; GFX11-FLATSCR-NEXT: s_clause 0x1
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v20, 0x3efcd89c
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, v18
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
-; GFX11-FLATSCR-NEXT: s_clause 0x3
+; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[13:16], off offset:240
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
; GFX11-FLATSCR-NEXT: scratch_load_b32 v14, v37, off
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, v0
@@ -6142,8 +6144,7 @@ define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg,
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:784
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v31, 0xbf5f2ee2 :: v_dual_mov_b32 v32, v6
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v6, v13
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v19, v2
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, v13 :: v_dual_mov_b32 v19, v2
; GFX11-FLATSCR-NEXT: s_clause 0x4
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[33:36], off offset:768
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:752
@@ -6172,15 +6173,20 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; SI-NEXT: s_mov_b32 s11, 0xe8f000
; SI-NEXT: s_add_u32 s8, s8, s6
; SI-NEXT: s_addc_u32 s9, s9, 0
+; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; SI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
; SI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; SI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; SI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; SI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
-; SI-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
-; SI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
-; SI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
; SI-NEXT: buffer_store_dword v6, off, s[8:11], 0 offset:304
; SI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:300
; SI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:296
@@ -6188,26 +6194,19 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:288
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; SI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
-; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
-; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
-; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:256
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; SI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; SI-NEXT: s_mov_b32 s0, 0
+; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; SI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; SI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:252
; SI-NEXT: s_waitcnt expcnt(0)
@@ -6220,19 +6219,8 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; SI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; SI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; SI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; SI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; SI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; SI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; SI-NEXT: s_mov_b32 s0, 0
-; SI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:268
-; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:248
-; SI-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:240
-; SI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:232
-; SI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:228
-; SI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:224
; SI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:220
-; SI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:216
+; SI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; SI-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:212
; SI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
; SI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
@@ -6241,8 +6229,22 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; SI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x200, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
+; SI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
+; SI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
+; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
+; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
+; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
+; SI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:268
+; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:248
+; SI-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:240
+; SI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:232
+; SI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:228
+; SI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:224
+; SI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:216
; SI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:196
; SI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; SI-NEXT: buffer_load_dword v0, v0, s[8:11], 0 offen
; SI-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:832
; SI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:828
@@ -6250,17 +6252,16 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; SI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:820
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; SI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
-; SI-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:816
-; SI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:812
-; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:808
-; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:804
-; SI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:796
; SI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:792
; SI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:788
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
+; SI-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:816
+; SI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:812
+; SI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:808
+; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:804
+; SI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:800
; SI-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:784
; SI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:780
; SI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:776
@@ -6298,37 +6299,35 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; VI-NEXT: s_mov_b32 s11, 0xe80000
; VI-NEXT: s_add_u32 s8, s8, s6
; VI-NEXT: s_addc_u32 s9, s9, 0
+; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
+; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
; VI-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
+; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
; VI-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; VI-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; VI-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; VI-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
-; VI-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
-; VI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
-; VI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
; VI-NEXT: buffer_store_dword v6, off, s[8:11], 0 offset:304
; VI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:300
; VI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:296
; VI-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:292
; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:288
; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
-; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; VI-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
-; VI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
-; VI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
-; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
-; VI-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
-; VI-NEXT: v_mov_b32_e32 v9, 0xb702e758
; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:256
; VI-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; VI-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
-; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; VI-NEXT: s_mov_b32 s0, 0
+; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
+; VI-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; VI-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; VI-NEXT: v_mov_b32_e32 v8, 0xbe319356
; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:252
; VI-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -6338,19 +6337,8 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; VI-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; VI-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; VI-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; VI-NEXT: v_mov_b32_e32 v14, 0xbf523be3
-; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; VI-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; VI-NEXT: v_and_b32_e32 v0, 0x1fc, v0
-; VI-NEXT: s_mov_b32 s0, 0
-; VI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:268
-; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:248
-; VI-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:240
-; VI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:232
-; VI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:228
-; VI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:224
; VI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:220
-; VI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:216
+; VI-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
; VI-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:212
; VI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
; VI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
@@ -6358,24 +6346,37 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; VI-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x200, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
+; VI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
+; VI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
+; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
+; VI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
+; VI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
+; VI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:268
+; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:248
+; VI-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:240
+; VI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:232
+; VI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:228
+; VI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:224
+; VI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:216
; VI-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:196
; VI-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; VI-NEXT: buffer_load_dword v0, v0, s[8:11], 0 offen
; VI-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:832
; VI-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:828
; VI-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:824
; VI-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:820
; VI-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; VI-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; VI-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:796
+; VI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:792
+; VI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:788
+; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:816
; VI-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:812
; VI-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:808
; VI-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:804
; VI-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:800
-; VI-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:796
-; VI-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:792
-; VI-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:788
-; VI-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; VI-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:784
; VI-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:780
; VI-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:776
@@ -6411,36 +6412,33 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX9-MUBUF-NEXT: s_mov_b32 s11, 0xe00000
; GFX9-MUBUF-NEXT: s_add_u32 s8, s8, s5
; GFX9-MUBUF-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbf20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
+; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f3d349e
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f523be1
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f5f2ee2
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v6, 0x3f638e37
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:320
-; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
-; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
; GFX9-MUBUF-NEXT: buffer_store_dword v6, off, s[8:11], 0 offset:304
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:300
; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:296
; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:292
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:288
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd8a3
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xbe31934f
-; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
-; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:264
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb7043519
-; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:260
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0xb702e758
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:256
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e31934f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v10, 0x3eae29d8
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89c
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
+; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:284
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v2, 0xbefcd89f
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbeae29dc
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v8, 0xbe319356
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:252
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v9, 0x3e319356
@@ -6450,26 +6448,30 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v11, 0x3efcd89f
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v12, 0xbf20e7f5
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v13, 0xbf3d349e
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf523be3
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:220
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v15, 0xbf5f2ee3
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v16, 0xbf638e39
-; GFX9-MUBUF-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:212
+; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
+; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:200
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
+; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
+; GFX9-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:316
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:312
+; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:308
+; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:280
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:276
+; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:272
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:268
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:248
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:240
; GFX9-MUBUF-NEXT: buffer_store_dword v11, off, s[8:11], 0 offset:232
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:228
; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:224
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:220
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:216
-; GFX9-MUBUF-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:212
-; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
-; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:200
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f5
-; GFX9-MUBUF-NEXT: v_and_b32_e32 v0, 0x1fc, v0
; GFX9-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:196
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3f20e7f4
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
; GFX9-MUBUF-NEXT: v_add_u32_e32 v1, 0x200, v0
; GFX9-MUBUF-NEXT: buffer_load_dword v0, v0, s[8:11], 0 offen
; GFX9-MUBUF-NEXT: s_nop 0
@@ -6478,16 +6480,15 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX9-MUBUF-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:824
; GFX9-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:820
; GFX9-MUBUF-NEXT: v_mov_b32_e32 v17, 0x3703c499
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3f3d349c
+; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:796
+; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:792
+; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:788
+; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:816
; GFX9-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:812
; GFX9-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:808
; GFX9-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:804
; GFX9-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:800
-; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:796
-; GFX9-MUBUF-NEXT: buffer_store_dword v13, off, s[8:11], 0 offset:792
-; GFX9-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:788
-; GFX9-MUBUF-NEXT: v_mov_b32_e32 v18, 0xbf523be1
; GFX9-MUBUF-NEXT: buffer_store_dword v18, off, s[8:11], 0 offset:784
; GFX9-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:780
; GFX9-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:776
@@ -6522,10 +6523,10 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W32-MUBUF-NEXT: s_mov_b32 s10, -1
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v1, 0xbf20e7f4
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v2, 0x3f3d349e
-; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f5f2ee2
; GFX10_W32-MUBUF-NEXT: s_mov_b32 s11, 0x31c16000
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f523be1
; GFX10_W32-MUBUF-NEXT: s_add_u32 s8, s8, s5
+; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f5f2ee2
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbefcd8a3
; GFX10_W32-MUBUF-NEXT: s_addc_u32 s9, s9, 0
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f638e37
@@ -6536,8 +6537,6 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:312
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:308
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:304
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:300
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:292
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:288
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:284
@@ -6580,6 +6579,8 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:300
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:296
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:200
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:196
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -6593,8 +6594,6 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:812
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:808
-; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:800
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:796
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:792
@@ -6602,6 +6601,8 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v19, off, s[8:11], 0 offset:784
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:780
; GFX10_W32-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:808
+; GFX10_W32-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:804
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:776
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:772
; GFX10_W32-MUBUF-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:768
@@ -6633,10 +6634,10 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W64-MUBUF-NEXT: s_mov_b32 s10, -1
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v1, 0xbf20e7f4
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v2, 0x3f3d349e
-; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f5f2ee2
; GFX10_W64-MUBUF-NEXT: s_mov_b32 s11, 0x31e16000
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v3, 0x3f523be1
; GFX10_W64-MUBUF-NEXT: s_add_u32 s8, s8, s5
+; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v4, 0x3f5f2ee2
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v7, 0xbefcd8a3
; GFX10_W64-MUBUF-NEXT: s_addc_u32 s9, s9, 0
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v5, 0x3f638e37
@@ -6647,8 +6648,6 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:312
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:308
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:304
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:300
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v2, off, s[8:11], 0 offset:292
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:288
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v7, off, s[8:11], 0 offset:284
@@ -6691,6 +6690,8 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:208
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:204
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f20e7f4
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:300
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v3, off, s[8:11], 0 offset:296
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:200
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v17, off, s[8:11], 0 offset:196
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v18, 0x3703c499
@@ -6704,8 +6705,6 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v1, off, s[8:11], 0 offset:812
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0x3f3d349c
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v19, 0xbf523be1
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:808
-; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v10, off, s[8:11], 0 offset:800
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v14, off, s[8:11], 0 offset:796
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v12, off, s[8:11], 0 offset:792
@@ -6713,6 +6712,8 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v19, off, s[8:11], 0 offset:784
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v4, off, s[8:11], 0 offset:780
; GFX10_W64-MUBUF-NEXT: v_mov_b32_e32 v14, 0xbf5f2ee2
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v9, off, s[8:11], 0 offset:808
+; GFX10_W64-MUBUF-NEXT: buffer_store_dword v8, off, s[8:11], 0 offset:804
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v15, off, s[8:11], 0 offset:776
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v5, off, s[8:11], 0 offset:772
; GFX10_W64-MUBUF-NEXT: buffer_store_dword v16, off, s[8:11], 0 offset:768
@@ -7124,10 +7125,10 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v0, 0xbeae29dc :: v_dual_mov_b32 v23, v21
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v6, 0x3f5f2ee2
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v5, 0x3f638e37
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v24, 0xbf523be3
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[1:4], off offset:320
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v7
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v4, v6
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v24, 0xbf523be3
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[5:8], off offset:304
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[1:4], off offset:288
@@ -7136,29 +7137,31 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v13, 0x3eae29dc :: v_dual_mov_b32 v34, v5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v3, 0xbefcd8a3
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v15, 0x3e319356 :: v_dual_mov_b32 v36, v6
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, 0xb702e758
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v10, 0xb7043519
+; GFX11-FLATSCR-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, 0xb7043519 :: v_dual_mov_b32 v29, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v11, 0xbe31934f
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v12, 0xbe319356 :: v_dual_mov_b32 v31, v19
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v18, 0xbf20e7f5
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v14, 0x3eae29d8
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v16, 0x3e31934f
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v22, 0xbf638e39
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v25, 0x3f20e7f5 :: v_dual_mov_b32 v26, v17
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v20, 0x3efcd89c :: v_dual_mov_b32 v29, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v30, v13
; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[0:3], off offset:272
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[9:12], off offset:256
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v1, 0x3f20e7f4
+; GFX11-FLATSCR-NEXT: s_clause 0x1
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
+; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v20, 0x3efcd89c
+; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v9, v18
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v10, v2 :: v_dual_mov_b32 v11, v0
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v33, v22
-; GFX11-FLATSCR-NEXT: s_clause 0x3
+; GFX11-FLATSCR-NEXT: s_clause 0x1
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[13:16], off offset:240
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[17:20], off offset:224
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[21:24], off offset:208
-; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[24:27], off offset:192
; GFX11-FLATSCR-NEXT: scratch_load_b32 v14, v37, off
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v32, 0x3f3d349c :: v_dual_mov_b32 v5, v15
; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v20, v0
@@ -7173,8 +7176,7 @@ define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg,
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v24, v19 :: v_dual_mov_b32 v35, v21
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:784
; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v31, 0xbf5f2ee2 :: v_dual_mov_b32 v32, v6
-; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v27, v8 :: v_dual_mov_b32 v6, v13
-; GFX11-FLATSCR-NEXT: v_mov_b32_e32 v19, v2
+; GFX11-FLATSCR-NEXT: v_dual_mov_b32 v6, v13 :: v_dual_mov_b32 v19, v2
; GFX11-FLATSCR-NEXT: s_clause 0x4
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[33:36], off offset:768
; GFX11-FLATSCR-NEXT: scratch_store_b128 off, v[29:32], off offset:752
diff --git a/llvm/test/CodeGen/AMDGPU/soft-clause-exceeds-register-budget.ll b/llvm/test/CodeGen/AMDGPU/soft-clause-exceeds-register-budget.ll
index 71e4755..c90d788 100644
--- a/llvm/test/CodeGen/AMDGPU/soft-clause-exceeds-register-budget.ll
+++ b/llvm/test/CodeGen/AMDGPU/soft-clause-exceeds-register-budget.ll
@@ -3,9 +3,6 @@
define protected amdgpu_kernel void @excess_soft_clause_reg_pressure(ptr addrspace(4) %wei_ptr, ptr addrspace(1) %out_ptr, ptr addrspace(1) %in) {
; CHECK-LABEL: excess_soft_clause_reg_pressure:
; CHECK: BB0_1: ; %for.cond28.preheader
-; CHECK: s_load_dwordx16
-; CHECK-NEXT: s_load_dwordx16
-
; CHECK: global_load_dword
; CHECK-NEXT: global_load_dword
; CHECK-NEXT: global_load_dword
@@ -18,11 +15,23 @@ define protected amdgpu_kernel void @excess_soft_clause_reg_pressure(ptr addrspa
; CHECK-NOT: v_readlane_b32
; CHECK: s_load_dwordx16
+; CHECK-NEXT: s_load_dwordx16
+
+; CHECK-NOT: v_writelane_b32
+; CHECK-NOT: v_readlane_b32
+
; CHECK: s_load_dwordx16
+; CHECK-NEXT: s_load_dwordx16
+
+; CHECK-NOT: v_writelane_b32
+; CHECK-NOT: v_readlane_b32
+
; CHECK: s_load_dwordx16
+; CHECK-NEXT: s_load_dwordx16
; CHECK-NOT: v_writelane_b32
; CHECK-NOT: v_readlane_b32
+
entry:
%i = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%i2 = load i64, ptr addrspace(4) %i, align 8
diff --git a/llvm/test/CodeGen/AMDGPU/spill-agpr.ll b/llvm/test/CodeGen/AMDGPU/spill-agpr.ll
index da48af1..1a0f75e 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-agpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-agpr.ll
@@ -448,13 +448,13 @@ define amdgpu_kernel void @max_6regs_used_8a(ptr addrspace(1) %arg) #4 {
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: s_nop 0
; GFX90A-NEXT: v_mfma_f32_4x4x1f32 a[0:3], v2, v2, a[0:3]
-; GFX90A-NEXT: s_nop 4
-; GFX90A-NEXT: global_store_dwordx4 v0, a[0:3], s[2:3]
; GFX90A-NEXT: buffer_load_dword v2, off, s[8:11], 0 ; 4-byte Folded Reload
; GFX90A-NEXT: buffer_load_dword v3, off, s[8:11], 0 offset:4 ; 4-byte Folded Reload
; GFX90A-NEXT: buffer_load_dword v4, off, s[8:11], 0 offset:8 ; 4-byte Folded Reload
; GFX90A-NEXT: buffer_load_dword v5, off, s[8:11], 0 offset:12 ; 4-byte Folded Reload
-; GFX90A-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NEXT: s_nop 0
+; GFX90A-NEXT: global_store_dwordx4 v0, a[0:3], s[2:3]
+; GFX90A-NEXT: s_waitcnt vmcnt(1)
; GFX90A-NEXT: global_store_dwordx4 v[0:1], v[2:5], off
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: ;;#ASMSTART
diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
index 50056b6..b5474b8 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
@@ -10314,7 +10314,8 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac
; GFX9-FLATSCR-NEXT: s_addc_u32 flat_scratch_hi, s9, 0
; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2050
; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v4, 16
-; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:144
+; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(1)
; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill
; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:224
; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2040
@@ -10327,12 +10328,10 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac
; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[19:22], v5, s[38:39] offset:192
; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[15:18], v5, s[38:39] offset:176
; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:160
-; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:144
; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2020
-; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(1)
+; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0)
; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill
; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2070
-; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(1)
; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[6:9], s0 ; 16-byte Folded Spill
; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:128
; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:112
@@ -10344,7 +10343,9 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac
; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill
; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:96
; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20b0
-; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[11:14], v5, s[38:39] offset:32
+; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:16
+; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(2)
; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill
; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[0:3], v5, s[38:39] offset:80
; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x20a0
@@ -10358,10 +10359,7 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac
; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2080
; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0)
; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[0:3], s0 ; 16-byte Folded Spill
-; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[11:14], v5, s[38:39] offset:32
-; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[6:9], v5, s[38:39] offset:16
; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2060
-; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0)
; GFX9-FLATSCR-NEXT: scratch_store_dwordx4 off, v[6:9], s0 ; 16-byte Folded Spill
; GFX9-FLATSCR-NEXT: global_load_dwordx4 v[7:10], v5, s[38:39]
; GFX9-FLATSCR-NEXT: v_mov_b32_e32 v6, 1
@@ -10468,13 +10466,13 @@ define amdgpu_kernel void @test_limited_sgpr(ptr addrspace(1) %out, ptr addrspac
; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:224
; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[6:9], off, s0 ; 16-byte Folded Reload
; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2020
-; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload
+; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2070
+; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(1)
; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[6:9], s[36:37] offset:208
; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[19:22], s[36:37] offset:192
; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[15:18], s[36:37] offset:176
-; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload
-; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2070
-; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; GFX9-FLATSCR-NEXT: s_waitcnt vmcnt(3)
; GFX9-FLATSCR-NEXT: global_store_dwordx4 v5, v[0:3], s[36:37] offset:160
; GFX9-FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 ; 16-byte Folded Reload
; GFX9-FLATSCR-NEXT: s_movk_i32 s0, 0x2010
diff --git a/llvm/test/CodeGen/AMDGPU/stack-realign.ll b/llvm/test/CodeGen/AMDGPU/stack-realign.ll
index 9cb22da..802de80 100644
--- a/llvm/test/CodeGen/AMDGPU/stack-realign.ll
+++ b/llvm/test/CodeGen/AMDGPU/stack-realign.ll
@@ -295,9 +295,9 @@ define void @func_call_align1024_bp_gets_vgpr_spill(<32 x i32> %a, i32 %b) #0 {
; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:1028 ; 4-byte Folded Spill
; GCN-NEXT: s_mov_b64 exec, s[18:19]
; GCN-NEXT: v_writelane_b32 v40, s16, 2
-; GCN-NEXT: v_mov_b32_e32 v32, 0
; GCN-NEXT: v_writelane_b32 v40, s34, 3
; GCN-NEXT: s_mov_b32 s34, s32
+; GCN-NEXT: v_mov_b32_e32 v32, 0
; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:1024
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s34
diff --git a/llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll b/llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll
index d80ec6b..8f8e2c0 100644
--- a/llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll
+++ b/llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll
@@ -655,7 +655,7 @@ bb:
br label %bb5
bb5: ; preds = %bb5.backedge, %bb
- %tmp4.i.sroa.0.0 = phi <9 x double> [ undef, %bb ], [ %tmp4.i.sroa.0.1, %bb5.backedge ]
+ %tmp4.i.sroa.0.0 = phi <9 x double> [ poison, %bb ], [ %tmp4.i.sroa.0.1, %bb5.backedge ]
%tmp14.1.i = load i32, ptr inttoptr (i64 128 to ptr), align 128
store i32 0, ptr addrspace(5) null, align 4
%tmp14.2.i = load i32, ptr inttoptr (i64 128 to ptr), align 128
diff --git a/llvm/test/CodeGen/AMDGPU/twoaddr-bundle.mir b/llvm/test/CodeGen/AMDGPU/twoaddr-bundle.mir
new file mode 100644
index 0000000..696962a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/twoaddr-bundle.mir
@@ -0,0 +1,57 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 %s --passes=two-address-instruction -verify-each -o - | FileCheck --check-prefixes=GCN %s
+
+# Exercise very basic handling of BUNDLE'd instructions by the two-address-instruction pass.
+
+# This test is an example where it is best to keep the two-address instruction
+# and resolve the tie with a COPY that is expected to be coalesced.
+---
+name: test_fmac_bundle
+body: |
+ bb.0:
+
+ ; GCN-LABEL: name: test_fmac_bundle
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY]], [[COPY1]], 0, implicit $exec
+ ; GCN-NEXT: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[V_ADD_U32_e64_]]
+ ; GCN-NEXT: BUNDLE implicit-def [[COPY2]], implicit [[DEF]], implicit [[DEF1]], implicit [[COPY2]](tied-def 0), implicit $mode, implicit $exec {
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e32 killed [[DEF]], killed [[DEF1]], killed [[COPY2]], implicit $mode, implicit $exec
+ ; GCN-NEXT: }
+ %10:vgpr_32 = COPY $vgpr0
+ %11:vgpr_32 = COPY $vgpr1
+ %2:vgpr_32 = V_ADD_U32_e64 %10, %11, 0, implicit $exec
+ %0:vgpr_32 = IMPLICIT_DEF
+ %1:vgpr_32 = IMPLICIT_DEF
+ BUNDLE implicit-def %3:vgpr_32, implicit %0, implicit %1, implicit killed %2(tied-def 0), implicit $mode, implicit $exec {
+ %3:vgpr_32 = V_FMAC_F32_e32 killed %0, killed %1, killed %2, implicit $mode, implicit $exec
+ }
+
+...
+
+# This test is an example where conversion to three-address form would be beneficial.
+---
+name: test_fmac_reuse_bundle
+body: |
+ bb.0:
+
+ ; GCN-LABEL: name: test_fmac_reuse_bundle
+ ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GCN-NEXT: BUNDLE implicit-def [[COPY1]], implicit [[DEF]], implicit [[DEF1]], implicit [[COPY1]](tied-def 0), implicit $mode, implicit $exec {
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e32 killed [[DEF]], killed [[DEF1]], killed [[COPY1]], implicit $mode, implicit $exec
+ ; GCN-NEXT: }
+ ; GCN-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY1]], [[COPY]], 0, implicit $exec
+ %2:vgpr_32 = COPY $vgpr0
+ %0:vgpr_32 = IMPLICIT_DEF
+ %1:vgpr_32 = IMPLICIT_DEF
+ BUNDLE implicit-def %3:vgpr_32, implicit %0, implicit %1, implicit %2(tied-def 0), implicit $mode, implicit $exec {
+ %3:vgpr_32 = V_FMAC_F32_e32 killed %0, killed %1, killed %2, implicit $mode, implicit $exec
+ }
+ %4:vgpr_32 = V_ADD_U32_e64 %3, %2, 0, implicit $exec
+
+...